repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
GitYiheng/reinforcement_learning_test | test01_cartpendulum/t01_qlearning.py | 1 | 4374 | from __future__ import print_function, division
from builtins import range
import gym
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
class SGDRegressor:
def __init__(self, D):
self.w = np.random.randn(D) / np.sqrt(D)
self.lr = 10e-2
def partial_fit(self, X, Y):
self.w += self.lr*(Y - X.dot(self.w)).dot(X)
def predict(self, X):
return X.dot(self.w)
class FeatureTransformer:
def __init__(self, env):
# observation_examples = np.array([env.observation_space.sample() for x in range(10000)])
# NOTE!! state samples are poor, b/c you get velocities --> infinity
observation_examples = np.random.random((20000, 4))*2 - 1
scaler = StandardScaler()
scaler.fit(observation_examples)
# Used to converte a state to a featurizes represenation.
# We use RBF kernels with different variances to cover different parts of the space
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=0.05, n_components=1000)),
("rbf2", RBFSampler(gamma=1.0, n_components=1000)),
("rbf3", RBFSampler(gamma=0.5, n_components=1000)),
("rbf4", RBFSampler(gamma=0.1, n_components=1000))
])
feature_examples = featurizer.fit_transform(scaler.transform(observation_examples))
self.dimensions = feature_examples.shape[1]
self.scaler = scaler
self.featurizer = featurizer
def transform(self, observations):
scaled = self.scaler.transform(observations)
return self.featurizer.transform(scaled)
# Holds one SGDRegressor for each action
class Model:
def __init__(self, env, feature_transformer):
self.env = env
self.models = []
self.feature_transformer = feature_transformer
for i in range(env.action_space.n):
model = SGDRegressor(feature_transformer.dimensions)
self.models.append(model)
def predict(self, s):
X = self.feature_transformer.transform(np.atleast_2d(s))
return np.array([m.predict(X)[0] for m in self.models])
def update(self, s, a, G):
X = self.feature_transformer.transform(np.atleast_2d(s))
self.models[a].partial_fit(X, [G])
def sample_action(self, s, eps):
if np.random.random() < eps:
return self.env.action_space.sample()
else:
return np.argmax(self.predict(s))
def play_one(env, model, eps, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
if done:
reward = -200
# update the model
next = model.predict(observation)
assert(len(next.shape) == 1)
G = reward + gamma*np.max(next)
model.update(prev_observation, action, G)
if reward == 1: # if we changed the reward to -200
totalreward += reward
iters += 1
return totalreward
def plot_running_avg(totalrewards):
N = len(totalrewards)
running_avg = np.empty(N)
for t in range(N):
running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean()
plt.plot(running_avg)
plt.title("Running Average")
plt.show()
def main():
env = gym.make('CartPole-v0')
ft = FeatureTransformer(env)
model = Model(env, ft)
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 500
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
eps = 1.0/np.sqrt(n+1)
totalreward = play_one(env, model, eps, gamma)
totalrewards[n] = totalreward
if n % 100 == 0:
print("episode:", n, "total reward:", totalreward, "eps:", eps, "avg reward (last 100):", totalrewards[max(0, n-100):(n+1)].mean())
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
if __name__ == '__main__':
main()
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/metrics/tests/test_score_objects.py | 2 | 13890 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| mit |
nityas/6869-finalproject | src/ann.py | 1 | 3260 | import sys
try:
from sklearn import datasets
except:
print("scikit-learn is required to run this example.")
exit(1)
try:
from openann import *
except:
print("OpenANN Python bindings are not installed!")
exit(1)
#NOTE: LABELS ARE 0-INDEXED, UNLIKE WITH LOGISTIC REGRESSION
HOG_TRAINING_DATA = 'data/hog_training_data.npy'
HOG_TRAINING_LABELS = 'data/hog_training_labels.npy'
HOG_TESTING_DATA = 'data/hog_testing_data.npy'
HOG_TESTING_LABELS = 'data/hog_testing_labels.npy'
def print_usage():
print("Usage:")
print(" python benchmark [run]")
def run_ann():
train_labels = numpy.load(HOG_TRAINING_LABELS)
train_features = numpy.load(HOG_TRAINING_DATA)
test_labels = numpy.load(HOG_TESTING_LABELS)
test_features = numpy.load(HOG_TESTING_DATA)
total_features = numpy.concatenate((train_features, test_features), axis=0)
total_labels = numpy.concatenate((train_labels, test_labels), axis=0)
X = numpy.array(total_features)
Y = numpy.array(total_labels)
Y = Y - 1
D = X.shape[1]
F = len(numpy.unique(Y))
N = len(X)
# Preprocess data (normalization and 1-of-c encoding)
stds = X.std(axis=0)
for i in range (0, len(stds)):
if stds[i] == 0:
stds[i] = 1
X = (X - X.mean(axis=0)) / stds
T = numpy.zeros((N, F))
T[(range(N), Y)] = 1.0
# Setup network
net = Net()
net.set_regularization(0.01, 0.01, 0)
net.input_layer(D)
net.fully_connected_layer(100, Activation.LOGISTIC)
net.output_layer(F, Activation.SOFTMAX)
net.set_error_function(Error.CE)
# Split dataset into training set and validation set and make sure that
# each class is equally distributed in the datasets
X1 = numpy.vstack((X[0:(N/2)]))
T1 = numpy.vstack((T[0:(N/2)]))
training_set = DataSet(X1, T1)
X2 = numpy.vstack((X[(N/2):]))
T2 = numpy.vstack((T[(N/2):]))
validation_set = DataSet(X2, T2)
# Train for 30 episodes (with tuned parameters for MBSGD)
optimizer = MBSGD({"maximal_iterations": 30}, learning_rate=0.9,
learning_rate_decay=0.999, min_learning_rate=0.001, momentum=0.5,
batch_size=128)
Log.set_info() # Deactivate debug output
optimizer.optimize(net, training_set)
print("TF data set has %d inputs, %d classes and %d examples" % (D, F, N))
print("The data has been split up input training and validation set.")
training_percent = float(classification_hits(net, training_set)) / len(X1)
testing_percent = float(classification_hits(net, validation_set)) / len(X2)
print("Correct predictions on training set: %d/%d, and percent is: %f"
% (classification_hits(net, training_set), len(X1), training_percent))
print("Confusion matrix:")
print(confusion_matrix(net, training_set)[0])
print("Correct predictions on test set: %d/%d, and percent is: %f"
% (classification_hits(net, validation_set), len(X2), testing_percent))
print("Confusion matrix:")
print(confusion_matrix(net, validation_set)[0])
if __name__ == "__main__":
if len(sys.argv) == 1:
print_usage()
for command in sys.argv[1:]:
if command == "run":
run_ann()
else:
print_usage()
exit(1) | mit |
anielsen001/scipy | scipy/stats/tests/test_morestats.py | 4 | 54238 | # Author: Travis Oliphant, 2002
#
# Further enhancements and tests added by numerous SciPy developers.
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.random import RandomState
from numpy.testing import (TestCase, run_module_suite, assert_array_equal,
assert_almost_equal, assert_array_less, assert_array_almost_equal,
assert_raises, assert_, assert_allclose, assert_equal, dec, assert_warns)
from scipy import stats
from common_tests import check_named_results
# Matplotlib is not a scipy dependency but is optionally used in probplot, so
# check if it's available
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
g1 = [1.006, 0.996, 0.998, 1.000, 0.992, 0.993, 1.002, 0.999, 0.994, 1.000]
g2 = [0.998, 1.006, 1.000, 1.002, 0.997, 0.998, 0.996, 1.000, 1.006, 0.988]
g3 = [0.991, 0.987, 0.997, 0.999, 0.995, 0.994, 1.000, 0.999, 0.996, 0.996]
g4 = [1.005, 1.002, 0.994, 1.000, 0.995, 0.994, 0.998, 0.996, 1.002, 0.996]
g5 = [0.998, 0.998, 0.982, 0.990, 1.002, 0.984, 0.996, 0.993, 0.980, 0.996]
g6 = [1.009, 1.013, 1.009, 0.997, 0.988, 1.002, 0.995, 0.998, 0.981, 0.996]
g7 = [0.990, 1.004, 0.996, 1.001, 0.998, 1.000, 1.018, 1.010, 0.996, 1.002]
g8 = [0.998, 1.000, 1.006, 1.000, 1.002, 0.996, 0.998, 0.996, 1.002, 1.006]
g9 = [1.002, 0.998, 0.996, 0.995, 0.996, 1.004, 1.004, 0.998, 0.999, 0.991]
g10 = [0.991, 0.995, 0.984, 0.994, 0.997, 0.997, 0.991, 0.998, 1.004, 0.997]
class TestBayes_mvs(TestCase):
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.1036502226125329, 10.896349777387467),
rtol=1e-14)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
class TestMvsdist(TestCase):
def test_basic(self):
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.mvsdist(data)
assert_almost_equal(mean.mean(), 9.0)
assert_allclose(mean.interval(0.9), (7.1036502226125329,
10.896349777387467), rtol=1e-14)
assert_almost_equal(var.mean(), 10.0)
assert_allclose(var.interval(0.9), (3.1767242068607087,
24.45910381334018), rtol=1e-09)
assert_almost_equal(std.mean(), 2.9724954732045084, decimal=14)
assert_allclose(std.interval(0.9), (1.7823367265645145,
4.9456146050146312), rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.mvsdist, [])
def test_bad_arg(self):
# Raise ValueError if fewer than two data points are given.
data = [1]
assert_raises(ValueError, stats.mvsdist, data)
def test_warns(self):
# regression test for gh-5270
# make sure there are no spurious divide-by-zero warnings
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
[x.mean() for x in stats.mvsdist([1, 2, 3])]
[x.mean() for x in stats.mvsdist([1, 2, 3, 4, 5])]
class TestShapiro(TestCase):
def test_basic(self):
x1 = [0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75, 0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10, 0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
# Verified against R
np.random.seed(12345678)
x3 = stats.norm.rvs(loc=5, scale=3, size=100)
w, pw = stats.shapiro(x3)
assert_almost_equal(w, 0.9772805571556091, decimal=6)
assert_almost_equal(pw, 0.08144091814756393, decimal=3)
# Extracted from original paper
x4 = [0.139, 0.157, 0.175, 0.256, 0.344, 0.413, 0.503, 0.577, 0.614,
0.655, 0.954, 1.392, 1.557, 1.648, 1.690, 1.994, 2.174, 2.206,
3.245, 3.510, 3.571, 4.354, 4.980, 6.084, 8.351]
W_expected = 0.83467
p_expected = 0.000914
w, pw = stats.shapiro(x4)
assert_almost_equal(w, W_expected, decimal=4)
assert_almost_equal(pw, p_expected, decimal=5)
def test_2d(self):
x1 = [[0.11, 7.87, 4.61, 10.14, 7.95, 3.14, 0.46,
4.43, 0.21, 4.75], [0.71, 1.52, 3.24,
0.93, 0.42, 4.97, 9.53, 4.55, 0.47, 6.66]]
w, pw = stats.shapiro(x1)
assert_almost_equal(w, 0.90047299861907959, 6)
assert_almost_equal(pw, 0.042089745402336121, 6)
x2 = [[1.36, 1.14, 2.92, 2.55, 1.46, 1.06, 5.27, -1.11,
3.48, 1.10], [0.88, -0.51, 1.46, 0.52, 6.20, 1.69,
0.08, 3.67, 2.81, 3.49]]
w, pw = stats.shapiro(x2)
assert_almost_equal(w, 0.9590270, 6)
assert_almost_equal(pw, 0.52460, 3)
def test_empty_input(self):
assert_raises(ValueError, stats.shapiro, [])
assert_raises(ValueError, stats.shapiro, [[], [], []])
def test_not_enough_values(self):
assert_raises(ValueError, stats.shapiro, [1, 2])
assert_raises(ValueError, stats.shapiro, [[], [2]])
def test_bad_arg(self):
# Length of x is less than 3.
x = [1]
assert_raises(ValueError, stats.shapiro, x)
def test_nan_input(self):
x = np.arange(10.)
x[9] = np.nan
w, pw = stats.shapiro(x)
assert_equal(w, np.nan)
assert_almost_equal(pw, 1.0)
class TestAnderson(TestCase):
def test_normal(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1)
assert_array_less(crit[:-1], A)
A, crit, sig = stats.anderson(x2)
assert_array_less(A, crit[-2:])
v = np.ones(10)
v[0] = 0
A, crit, sig = stats.anderson(v)
# The expected statistic 3.208057 was computed independently of scipy.
# For example, in R:
# > library(nortest)
# > v <- rep(1, 10)
# > v[1] <- 0
# > result <- ad.test(v)
# > result$statistic
# A
# 3.208057
assert_allclose(A, 3.208057)
def test_expon(self):
rs = RandomState(1234567890)
x1 = rs.standard_exponential(size=50)
x2 = rs.standard_normal(size=50)
A, crit, sig = stats.anderson(x1, 'expon')
assert_array_less(A, crit[-2:])
olderr = np.seterr(all='ignore')
try:
A, crit, sig = stats.anderson(x2, 'expon')
finally:
np.seterr(**olderr)
assert_(A > crit[-1])
def test_gumbel(self):
# Regression test for gh-6306. Before that issue was fixed,
# this case would return a2=inf.
v = np.ones(100)
v[0] = 0.0
a2, crit, sig = stats.anderson(v, 'gumbel')
# A brief reimplementation of the calculation of the statistic.
n = len(v)
xbar, s = stats.gumbel_l.fit(v)
logcdf = stats.gumbel_l.logcdf(v, xbar, s)
logsf = stats.gumbel_l.logsf(v, xbar, s)
i = np.arange(1, n+1)
expected_a2 = -n - np.mean((2*i - 1) * (logcdf + logsf[::-1]))
assert_allclose(a2, expected_a2)
def test_bad_arg(self):
assert_raises(ValueError, stats.anderson, [1], dist='plate_of_shrimp')
def test_result_attributes(self):
rs = RandomState(1234567890)
x = rs.standard_exponential(size=50)
res = stats.anderson(x)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
def test_gumbel_l(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x = rs.gumbel(size=100)
A1, crit1, sig1 = stats.anderson(x, 'gumbel')
A2, crit2, sig2 = stats.anderson(x, 'gumbel_l')
assert_allclose(A2, A1)
def test_gumbel_r(self):
# gh-2592, gh-6337
# Adds support to 'gumbel_r' and 'gumbel_l' as valid inputs for dist.
rs = RandomState(1234567890)
x1 = rs.gumbel(size=100)
x2 = np.ones(100)
A1, crit1, sig1 = stats.anderson(x1, 'gumbel_r')
A2, crit2, sig2 = stats.anderson(x2, 'gumbel_r')
assert_array_less(A1, crit1[-2:])
assert_(A2 > crit2[-1])
class TestAndersonKSamp(TestCase):
def test_example1a(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
assert_warns(UserWarning, stats.anderson_ksamp, (t1, t2, t3, t4),
midrank=False)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
assert_almost_equal(Tk, 4.449, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0021, 4)
def test_example1b(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass arrays
t1 = np.array([38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0])
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4), midrank=True)
assert_almost_equal(Tk, 4.480, 3)
assert_array_almost_equal([0.4985, 1.3237, 1.9158, 2.4930, 3.2459],
tm, 4)
assert_almost_equal(p, 0.0020, 4)
def test_example2a(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
# Pass lists instead of arrays
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=False)
assert_almost_equal(Tk, 3.288, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_example2b(self):
# Example data taken from an earlier technical report of
# Scholz and Stephens
t1 = [194, 15, 41, 29, 33, 181]
t2 = [413, 14, 58, 37, 100, 65, 9, 169, 447, 184, 36, 201, 118]
t3 = [34, 31, 18, 18, 67, 57, 62, 7, 22, 34]
t4 = [90, 10, 60, 186, 61, 49, 14, 24, 56, 20, 79, 84, 44, 59, 29,
118, 25, 156, 310, 76, 26, 44, 23, 62]
t5 = [130, 208, 70, 101, 208]
t6 = [74, 57, 48, 29, 502, 12, 70, 21, 29, 386, 59, 27]
t7 = [55, 320, 56, 104, 220, 239, 47, 246, 176, 182, 33]
t8 = [23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95]
t9 = [97, 51, 11, 4, 141, 18, 142, 68, 77, 80, 1, 16, 106, 206, 82,
54, 31, 216, 46, 111, 39, 63, 18, 191, 18, 163, 24]
t10 = [50, 44, 102, 72, 22, 39, 3, 15, 197, 188, 79, 88, 46, 5, 5, 36,
22, 139, 210, 97, 30, 23, 13, 14]
t11 = [359, 9, 12, 270, 603, 3, 104, 2, 438]
t12 = [50, 254, 5, 283, 35, 12]
t13 = [487, 18, 100, 7, 98, 5, 85, 91, 43, 230, 3, 130]
t14 = [102, 209, 14, 57, 54, 32, 67, 59, 134, 152, 27, 14, 230, 66,
61, 34]
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
Tk, tm, p = stats.anderson_ksamp((t1, t2, t3, t4, t5, t6, t7, t8,
t9, t10, t11, t12, t13, t14),
midrank=True)
assert_almost_equal(Tk, 3.294, 3)
assert_array_almost_equal([0.5990, 1.3269, 1.8052, 2.2486, 2.8009],
tm, 4)
assert_almost_equal(p, 0.0041, 4)
def test_not_enough_samples(self):
assert_raises(ValueError, stats.anderson_ksamp, np.ones(5))
def test_no_distinct_observations(self):
assert_raises(ValueError, stats.anderson_ksamp,
(np.ones(5), np.ones(5)))
def test_empty_sample(self):
assert_raises(ValueError, stats.anderson_ksamp, (np.ones(5), []))
def test_result_attributes(self):
# Example data from Scholz & Stephens (1987), originally
# published in Lehmann (1995, Nonparametrics, Statistical
# Methods Based on Ranks, p. 309)
# Pass a mixture of lists and arrays
t1 = [38.7, 41.5, 43.8, 44.5, 45.5, 46.0, 47.7, 58.0]
t2 = np.array([39.2, 39.3, 39.7, 41.4, 41.8, 42.9, 43.3, 45.8])
t3 = np.array([34.0, 35.0, 39.0, 40.0, 43.0, 43.0, 44.0, 45.0])
t4 = np.array([34.0, 34.8, 34.8, 35.4, 37.2, 37.8, 41.2, 42.8])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='approximate p-value')
res = stats.anderson_ksamp((t1, t2, t3, t4), midrank=False)
attributes = ('statistic', 'critical_values', 'significance_level')
check_named_results(res, attributes)
class TestAnsari(TestCase):
def test_small(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
W, pval = stats.ansari(x, y)
assert_almost_equal(W, 23.5, 11)
assert_almost_equal(pval, 0.13499256881897437, 11)
def test_approx(self):
ramsay = np.array((111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
101, 96, 97, 102, 107, 113, 116, 113, 110, 98))
parekh = np.array((107, 108, 106, 98, 105, 103, 110, 105, 104,
100, 96, 108, 103, 104, 114, 114, 113, 108,
106, 99))
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message="Ties preclude use of exact statistic.")
W, pval = stats.ansari(ramsay, parekh)
assert_almost_equal(W, 185.5, 11)
assert_almost_equal(pval, 0.18145819972867083, 11)
def test_exact(self):
W, pval = stats.ansari([1, 2, 3, 4], [15, 5, 20, 8, 10, 12])
assert_almost_equal(W, 10.0, 11)
assert_almost_equal(pval, 0.533333333333333333, 7)
def test_bad_arg(self):
assert_raises(ValueError, stats.ansari, [], [1])
assert_raises(ValueError, stats.ansari, [1], [])
def test_result_attributes(self):
x = [1, 2, 3, 3, 4]
y = [3, 2, 6, 1, 6, 1, 4, 1]
with warnings.catch_warnings(record=True): # Ties preclude use ...
res = stats.ansari(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBartlett(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
T, pval = stats.bartlett(*args)
assert_almost_equal(T, 20.78587342806484, 7)
assert_almost_equal(pval, 0.0136358632781, 7)
def test_bad_arg(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.bartlett, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.bartlett(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_empty_arg(self):
args = (g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, [])
assert_equal((np.nan, np.nan), stats.bartlett(*args))
class TestLevene(TestCase):
def test_data(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
W, pval = stats.levene(*args)
assert_almost_equal(W, 1.7059176930008939, 7)
assert_almost_equal(pval, 0.0990829755522, 7)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
W1, pval1 = stats.levene(g1, g2, g3, center='mean')
W2, pval2 = stats.levene(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
np.random.seed(1234)
x2 = np.random.permutation(x)
# Use center='trimmed'
W0, pval0 = stats.levene(x, y, center='trimmed',
proportiontocut=0.125)
W1, pval1 = stats.levene(x2, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
W2, pval2 = stats.levene(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(W0, W2)
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_equal_mean_median(self):
x = np.linspace(-1, 1, 21)
np.random.seed(1234)
x2 = np.random.permutation(x)
y = x**3
W1, pval1 = stats.levene(x, y, center='mean')
W2, pval2 = stats.levene(x2, y, center='median')
assert_almost_equal(W1, W2)
assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.levene, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.levene, x, x, center='trim')
def test_too_few_args(self):
assert_raises(ValueError, stats.levene, [1])
def test_result_attributes(self):
args = [g1, g2, g3, g4, g5, g6, g7, g8, g9, g10]
res = stats.levene(*args)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
class TestBinomP(TestCase):
def test_data(self):
pval = stats.binom_test(100, 250)
assert_almost_equal(pval, 0.0018833009350757682, 11)
pval = stats.binom_test(201, 405)
assert_almost_equal(pval, 0.92085205962670713, 11)
pval = stats.binom_test([682, 243], p=3.0/4)
assert_almost_equal(pval, 0.38249155957481695, 11)
def test_bad_len_x(self):
# Length of x must be 1 or 2.
assert_raises(ValueError, stats.binom_test, [1, 2, 3])
def test_bad_n(self):
# len(x) is 1, but n is invalid.
# Missing n
assert_raises(ValueError, stats.binom_test, [100])
# n less than x[0]
assert_raises(ValueError, stats.binom_test, [100], n=50)
def test_bad_p(self):
assert_raises(ValueError, stats.binom_test, [50, 50], p=2.0)
def test_alternatives(self):
res = stats.binom_test(51, 235, p=1./6, alternative='less')
assert_almost_equal(res, 0.982022657605858)
res = stats.binom_test(51, 235, p=1./6, alternative='greater')
assert_almost_equal(res, 0.02654424571169085)
res = stats.binom_test(51, 235, p=1./6, alternative='two-sided')
assert_almost_equal(res, 0.0437479701823997)
class TestFligner(TestCase):
def test_data(self):
# numbers from R: fligner.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.fligner(x1, x1**2),
(3.2282229927203536, 0.072379187848207877),
11)
def test_trimmed1(self):
# Test that center='trimmed' gives the same result as center='mean'
# when proportiontocut=0.
Xsq1, pval1 = stats.fligner(g1, g2, g3, center='mean')
Xsq2, pval2 = stats.fligner(g1, g2, g3, center='trimmed',
proportiontocut=0.0)
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
def test_trimmed2(self):
x = [1.2, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100.0]
y = [0.0, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 200.0]
# Use center='trimmed'
Xsq1, pval1 = stats.fligner(x, y, center='trimmed',
proportiontocut=0.125)
# Trim the data here, and use center='mean'
Xsq2, pval2 = stats.fligner(x[1:-1], y[1:-1], center='mean')
# Result should be the same.
assert_almost_equal(Xsq1, Xsq2)
assert_almost_equal(pval1, pval2)
# The following test looks reasonable at first, but fligner() uses the
# function stats.rankdata(), and in one of the cases in this test,
# there are ties, while in the other (because of normal rounding
# errors) there are not. This difference leads to differences in the
# third significant digit of W.
#
#def test_equal_mean_median(self):
# x = np.linspace(-1,1,21)
# y = x**3
# W1, pval1 = stats.fligner(x, y, center='mean')
# W2, pval2 = stats.fligner(x, y, center='median')
# assert_almost_equal(W1, W2)
# assert_almost_equal(pval1, pval2)
def test_bad_keyword(self):
x = np.linspace(-1, 1, 21)
assert_raises(TypeError, stats.fligner, x, x, portiontocut=0.1)
def test_bad_center_value(self):
x = np.linspace(-1, 1, 21)
assert_raises(ValueError, stats.fligner, x, x, center='trim')
def test_bad_num_args(self):
# Too few args raises ValueError.
assert_raises(ValueError, stats.fligner, [1])
def test_empty_arg(self):
x = np.arange(5)
assert_equal((np.nan, np.nan), stats.fligner(x, x**2, []))
class TestMood(TestCase):
def test_mood(self):
# numbers from R: mood.test in package stats
x1 = np.arange(5)
assert_array_almost_equal(stats.mood(x1, x1**2),
(-1.3830857299399906, 0.16663858066771478),
11)
def test_mood_order_of_args(self):
# z should change sign when the order of arguments changes, pvalue
# should not change
np.random.seed(1234)
x1 = np.random.randn(10, 1)
x2 = np.random.randn(15, 1)
z1, p1 = stats.mood(x1, x2)
z2, p2 = stats.mood(x2, x1)
assert_array_almost_equal([z1, p1], [-z2, p2])
def test_mood_with_axis_none(self):
# Test with axis = None, compare with results from R
x1 = [-0.626453810742332, 0.183643324222082, -0.835628612410047,
1.59528080213779, 0.329507771815361, -0.820468384118015,
0.487429052428485, 0.738324705129217, 0.575781351653492,
-0.305388387156356, 1.51178116845085, 0.389843236411431,
-0.621240580541804, -2.2146998871775, 1.12493091814311,
-0.0449336090152309, -0.0161902630989461, 0.943836210685299,
0.821221195098089, 0.593901321217509]
x2 = [-0.896914546624981, 0.184849184646742, 1.58784533120882,
-1.13037567424629, -0.0802517565509893, 0.132420284381094,
0.707954729271733, -0.23969802417184, 1.98447393665293,
-0.138787012119665, 0.417650750792556, 0.981752777463662,
-0.392695355503813, -1.03966897694891, 1.78222896030858,
-2.31106908460517, 0.878604580921265, 0.035806718015226,
1.01282869212708, 0.432265154539617, 2.09081920524915,
-1.19992581964387, 1.58963820029007, 1.95465164222325,
0.00493777682814261, -2.45170638784613, 0.477237302613617,
-0.596558168631403, 0.792203270299649, 0.289636710177348]
x1 = np.array(x1)
x2 = np.array(x2)
x1.shape = (10, 2)
x2.shape = (15, 2)
assert_array_almost_equal(stats.mood(x1, x2, axis=None),
[-1.31716607555, 0.18778296257])
def test_mood_2d(self):
# Test if the results of mood test in 2-D case are consistent with the
# R result for the same inputs. Numbers from R mood.test().
ny = 5
np.random.seed(1234)
x1 = np.random.randn(10, ny)
x2 = np.random.randn(15, ny)
z_vectest, pval_vectest = stats.mood(x1, x2)
for j in range(ny):
assert_array_almost_equal([z_vectest[j], pval_vectest[j]],
stats.mood(x1[:, j], x2[:, j]))
# inverse order of dimensions
x1 = x1.transpose()
x2 = x2.transpose()
z_vectest, pval_vectest = stats.mood(x1, x2, axis=1)
for i in range(ny):
# check axis handling is self consistent
assert_array_almost_equal([z_vectest[i], pval_vectest[i]],
stats.mood(x1[i, :], x2[i, :]))
def test_mood_3d(self):
shape = (10, 5, 6)
np.random.seed(1234)
x1 = np.random.randn(*shape)
x2 = np.random.randn(*shape)
for axis in range(3):
z_vectest, pval_vectest = stats.mood(x1, x2, axis=axis)
# Tests that result for 3-D arrays is equal to that for the
# same calculation on a set of 1-D arrays taken from the
# 3-D array
axes_idx = ([1, 2], [0, 2], [0, 1]) # the two axes != axis
for i in range(shape[axes_idx[axis][0]]):
for j in range(shape[axes_idx[axis][1]]):
if axis == 0:
slice1 = x1[:, i, j]
slice2 = x2[:, i, j]
elif axis == 1:
slice1 = x1[i, :, j]
slice2 = x2[i, :, j]
else:
slice1 = x1[i, j, :]
slice2 = x2[i, j, :]
assert_array_almost_equal([z_vectest[i, j],
pval_vectest[i, j]],
stats.mood(slice1, slice2))
def test_mood_bad_arg(self):
# Raise ValueError when the sum of the lengths of the args is
# less than 3
assert_raises(ValueError, stats.mood, [1], [])
class TestProbplot(TestCase):
def test_basic(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm, osr = stats.probplot(x, fit=False)
osm_expected = [-1.8241636, -1.38768012, -1.11829229, -0.91222575,
-0.73908135, -0.5857176, -0.44506467, -0.31273668,
-0.18568928, -0.06158146, 0.06158146, 0.18568928,
0.31273668, 0.44506467, 0.5857176, 0.73908135,
0.91222575, 1.11829229, 1.38768012, 1.8241636]
assert_allclose(osr, np.sort(x))
assert_allclose(osm, osm_expected)
res, res_fit = stats.probplot(x, fit=True)
res_fit_expected = [1.05361841, 0.31297795, 0.98741609]
assert_allclose(res_fit, res_fit_expected)
def test_sparams_keyword(self):
np.random.seed(123456)
x = stats.norm.rvs(size=100)
# Check that None, () and 0 (loc=0, for normal distribution) all work
# and give the same results
osm1, osr1 = stats.probplot(x, sparams=None, fit=False)
osm2, osr2 = stats.probplot(x, sparams=0, fit=False)
osm3, osr3 = stats.probplot(x, sparams=(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osm1, osm3)
assert_allclose(osr1, osr2)
assert_allclose(osr1, osr3)
# Check giving (loc, scale) params for normal distribution
osm, osr = stats.probplot(x, sparams=(), fit=False)
def test_dist_keyword(self):
np.random.seed(12345)
x = stats.norm.rvs(size=20)
osm1, osr1 = stats.probplot(x, fit=False, dist='t', sparams=(3,))
osm2, osr2 = stats.probplot(x, fit=False, dist=stats.t, sparams=(3,))
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
assert_raises(ValueError, stats.probplot, x, dist='wrong-dist-name')
assert_raises(AttributeError, stats.probplot, x, dist=[])
class custom_dist(object):
"""Some class that looks just enough like a distribution."""
def ppf(self, q):
return stats.norm.ppf(q, loc=2)
osm1, osr1 = stats.probplot(x, sparams=(2,), fit=False)
osm2, osr2 = stats.probplot(x, dist=custom_dist(), fit=False)
assert_allclose(osm1, osm2)
assert_allclose(osr1, osr2)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
np.random.seed(7654321)
fig = plt.figure()
fig.add_subplot(111)
x = stats.t.rvs(3, size=100)
res1, fitres1 = stats.probplot(x, plot=plt)
plt.close()
res2, fitres2 = stats.probplot(x, plot=None)
res3 = stats.probplot(x, fit=False, plot=plt)
plt.close()
res4 = stats.probplot(x, fit=False, plot=None)
# Check that results are consistent between combinations of `fit` and
# `plot` keywords.
assert_(len(res1) == len(res2) == len(res3) == len(res4) == 2)
assert_allclose(res1, res2)
assert_allclose(res1, res3)
assert_allclose(res1, res4)
assert_allclose(fitres1, fitres2)
# Check that a Matplotlib Axes object is accepted
fig = plt.figure()
ax = fig.add_subplot(111)
stats.probplot(x, fit=False, plot=ax)
plt.close()
def test_probplot_bad_args(self):
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.probplot, [1], dist="plate_of_shrimp")
def test_empty(self):
assert_equal(stats.probplot([], fit=False),
(np.array([]), np.array([])))
assert_equal(stats.probplot([], fit=True),
((np.array([]), np.array([])),
(np.nan, np.nan, 0.0)))
def test_array_of_size_one(self):
with np.errstate(invalid='ignore'):
assert_equal(stats.probplot([1], fit=True),
((np.array([0.]), np.array([1])),
(np.nan, np.nan, 0.0)))
def test_wilcoxon_bad_arg():
# Raise ValueError when two args of different lengths are given or
# zero_method is unknown.
assert_raises(ValueError, stats.wilcoxon, [1], [1, 2])
assert_raises(ValueError, stats.wilcoxon, [1, 2], [1, 2], "dummy")
def test_wilcoxon_arg_type():
# Should be able to accept list as arguments.
# Address issue 6070.
arr = [1, 2, 3, 0, -1, 3, 1, 2, 1, 1, 2]
_ = stats.wilcoxon(arr, zero_method="pratt")
_ = stats.wilcoxon(arr, zero_method="zsplit")
_ = stats.wilcoxon(arr, zero_method="wilcox")
class TestKstat(TestCase):
def test_moments_normal_distribution(self):
np.random.seed(32149)
data = np.random.randn(12345)
moments = []
for n in [1, 2, 3, 4]:
moments.append(stats.kstat(data, n))
expected = [0.011315, 1.017931, 0.05811052, 0.0754134]
assert_allclose(moments, expected, rtol=1e-4)
# test equivalence with `stats.moment`
m1 = stats.moment(data, moment=1)
m2 = stats.moment(data, moment=2)
m3 = stats.moment(data, moment=3)
assert_allclose((m1, m2, m3), expected[:-1], atol=0.02, rtol=1e-2)
def test_empty_input(self):
assert_raises(ValueError, stats.kstat, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_kstat_bad_arg(self):
# Raise ValueError if n > 4 or n < 1.
data = np.arange(10)
for n in [0, 4.001]:
assert_raises(ValueError, stats.kstat, data, n=n)
class TestKstatVar(TestCase):
def test_empty_input(self):
assert_raises(ValueError, stats.kstatvar, [])
def test_nan_input(self):
data = np.arange(10.)
data[6] = np.nan
assert_equal(stats.kstat(data), np.nan)
def test_bad_arg(self):
# Raise ValueError is n is not 1 or 2.
data = [1]
n = 10
assert_raises(ValueError, stats.kstatvar, data, n=n)
class TestPpccPlot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
svals, ppcc = stats.ppcc_plot(self.x, -10, 10, N=N)
ppcc_expected = [0.21139644, 0.21384059, 0.98766719, 0.97980182,
0.93519298]
assert_allclose(svals, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
def test_dist(self):
# Test that we can specify distributions both by name and as objects.
svals1, ppcc1 = stats.ppcc_plot(self.x, -10, 10, dist='tukeylambda')
svals2, ppcc2 = stats.ppcc_plot(self.x, -10, 10,
dist=stats.tukeylambda)
assert_allclose(svals1, svals2, rtol=1e-20)
assert_allclose(ppcc1, ppcc2, rtol=1e-20)
# Test that 'tukeylambda' is the default dist
svals3, ppcc3 = stats.ppcc_plot(self.x, -10, 10)
assert_allclose(svals1, svals3, rtol=1e-20)
assert_allclose(ppcc1, ppcc3, rtol=1e-20)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.ppcc_plot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `b` has to be larger than `a`
assert_raises(ValueError, stats.ppcc_plot, self.x, 1, 0)
# Raise ValueError when given an invalid distribution.
assert_raises(ValueError, stats.ppcc_plot, [1, 2, 3], 0, 1,
dist="plate_of_shrimp")
def test_empty(self):
# For consistency with probplot return for one empty array,
# ppcc contains all zeros and svals is the same as for normal array
# input.
svals, ppcc = stats.ppcc_plot([], 0, 1)
assert_allclose(svals, np.linspace(0, 1, num=80))
assert_allclose(ppcc, np.zeros(80, dtype=float))
class TestPpccMax(TestCase):
def test_ppcc_max_bad_arg(self):
# Raise ValueError when given an invalid distribution.
data = [1]
assert_raises(ValueError, stats.ppcc_max, data, dist="plate_of_shrimp")
def test_ppcc_max_basic(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x), -0.71215366521264145, decimal=5)
def test_dist(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
# Test that we can specify distributions both by name and as objects.
max1 = stats.ppcc_max(x, dist='tukeylambda')
max2 = stats.ppcc_max(x, dist=stats.tukeylambda)
assert_almost_equal(max1, -0.71215366521264145, decimal=5)
assert_almost_equal(max2, -0.71215366521264145, decimal=5)
# Test that 'tukeylambda' is the default dist
max3 = stats.ppcc_max(x)
assert_almost_equal(max3, -0.71215366521264145, decimal=5)
def test_brack(self):
np.random.seed(1234567)
x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
assert_raises(ValueError, stats.ppcc_max, x, brack=(0.0, 1.0, 0.5))
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(0, 1)),
-0.71215366521264145, decimal=5)
# On Python 2.6 the result is accurate to 5 decimals. On Python >= 2.7
# it is accurate up to 16 decimals
assert_almost_equal(stats.ppcc_max(x, brack=(-2, 2)),
-0.71215366521264145, decimal=5)
class TestBoxcox_llf(TestCase):
def test_basic(self):
np.random.seed(54321)
x = stats.norm.rvs(size=10000, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf_expected = -x.size / 2. * np.log(np.sum(x.std()**2))
assert_allclose(llf, llf_expected)
def test_array_like(self):
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self):
# Note: boxcox_llf() was already working with 2-D input (sort of), so
# keep it like that. boxcox() doesn't work with 2-D input though, due
# to brent() returning a scalar.
np.random.seed(54321)
x = stats.norm.rvs(size=100, loc=10)
lmbda = 1
llf = stats.boxcox_llf(lmbda, x)
llf2 = stats.boxcox_llf(lmbda, np.vstack([x, x]).T)
assert_allclose([llf, llf], llf2, rtol=1e-12)
def test_empty(self):
assert_(np.isnan(stats.boxcox_llf(1, [])))
class TestBoxcox(TestCase):
def test_fixed_lmbda(self):
np.random.seed(12345)
x = stats.loggamma.rvs(5, size=50) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
def test_lmbda_None(self):
np.random.seed(1234567)
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
np.random.seed(1245)
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
np.random.seed(1234)
x = stats.loggamma.rvs(5, size=50) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = stats.loggamma.rvs(7, size=500) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1])
assert_raises(ValueError, stats.boxcox, x)
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
class TestBoxcoxNormmax(TestCase):
def setUp(self):
np.random.seed(12345)
self.x = stats.loggamma.rvs(5, size=50) + 5
def test_pearsonr(self):
maxlog = stats.boxcox_normmax(self.x)
assert_allclose(maxlog, 1.804465, rtol=1e-6)
def test_mle(self):
maxlog = stats.boxcox_normmax(self.x, method='mle')
assert_allclose(maxlog, 1.758101, rtol=1e-6)
# Check that boxcox() uses 'mle'
_, maxlog_boxcox = stats.boxcox(self.x)
assert_allclose(maxlog_boxcox, maxlog)
def test_all(self):
maxlog_all = stats.boxcox_normmax(self.x, method='all')
assert_allclose(maxlog_all, [1.804465, 1.758101], rtol=1e-6)
class TestBoxcoxNormplot(TestCase):
def setUp(self):
np.random.seed(7654321)
self.x = stats.loggamma.rvs(5, size=500) + 5
def test_basic(self):
N = 5
lmbdas, ppcc = stats.boxcox_normplot(self.x, -10, 10, N=N)
ppcc_expected = [0.57783375, 0.83610988, 0.97524311, 0.99756057,
0.95843297]
assert_allclose(lmbdas, np.linspace(-10, 10, num=N))
assert_allclose(ppcc, ppcc_expected)
@dec.skipif(not have_matplotlib)
def test_plot_kwarg(self):
# Check with the matplotlib.pyplot module
fig = plt.figure()
fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=plt)
plt.close()
# Check that a Matplotlib Axes object is accepted
fig.add_subplot(111)
ax = fig.add_subplot(111)
stats.boxcox_normplot(self.x, -20, 20, plot=ax)
plt.close()
def test_invalid_inputs(self):
# `lb` has to be larger than `la`
assert_raises(ValueError, stats.boxcox_normplot, self.x, 1, 0)
# `x` can not contain negative values
assert_raises(ValueError, stats.boxcox_normplot, [-1, 1], 0, 1)
def test_empty(self):
assert_(stats.boxcox_normplot([], 0, 1).size == 0)
class TestCircFuncs(TestCase):
def test_circfuncs(self):
x = np.array([355, 5, 2, 359, 10, 350])
M = stats.circmean(x, high=360)
Mval = 0.167690146
assert_allclose(M, Mval, rtol=1e-7)
V = stats.circvar(x, high=360)
Vval = 42.51955609
assert_allclose(V, Vval, rtol=1e-7)
S = stats.circstd(x, high=360)
Sval = 6.520702116
assert_allclose(S, Sval, rtol=1e-7)
def test_circfuncs_small(self):
x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
M1 = x.mean()
M2 = stats.circmean(x, high=360)
assert_allclose(M2, M1, rtol=1e-5)
V1 = x.var()
V2 = stats.circvar(x, high=360)
assert_allclose(V2, V1, rtol=1e-4)
S1 = x.std()
S2 = stats.circstd(x, high=360)
assert_allclose(S2, S1, rtol=1e-4)
def test_circmean_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
M1 = stats.circmean(x, high=360)
M2 = stats.circmean(x.ravel(), high=360)
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=1)
M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(M1, M2, rtol=1e-14)
M1 = stats.circmean(x, high=360, axis=0)
M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(M1, M2, rtol=1e-14)
def test_circvar_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
V1 = stats.circvar(x, high=360)
V2 = stats.circvar(x.ravel(), high=360)
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=1)
V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(V1, V2, rtol=1e-11)
V1 = stats.circvar(x, high=360, axis=0)
V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(V1, V2, rtol=1e-11)
def test_circstd_axis(self):
x = np.array([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356]])
S1 = stats.circstd(x, high=360)
S2 = stats.circstd(x.ravel(), high=360)
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=1)
S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
assert_allclose(S1, S2, rtol=1e-11)
S1 = stats.circstd(x, high=360, axis=0)
S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
assert_allclose(S1, S2, rtol=1e-11)
def test_circfuncs_array_like(self):
x = [355, 5, 2, 359, 10, 350]
assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
def test_empty(self):
assert_(np.isnan(stats.circmean([])))
assert_(np.isnan(stats.circstd([])))
assert_(np.isnan(stats.circvar([])))
def test_accuracy_wilcoxon():
freq = [1, 4, 16, 15, 8, 4, 5, 1, 2]
nums = range(-4, 5)
x = np.concatenate([[u] * v for u, v in zip(nums, freq)])
y = np.zeros(x.size)
T, p = stats.wilcoxon(x, y, "pratt")
assert_allclose(T, 423)
assert_allclose(p, 0.00197547303533107)
T, p = stats.wilcoxon(x, y, "zsplit")
assert_allclose(T, 441)
assert_allclose(p, 0.0032145343172473055)
T, p = stats.wilcoxon(x, y, "wilcox")
assert_allclose(T, 327)
assert_allclose(p, 0.00641346115861)
# Test the 'correction' option, using values computed in R with:
# > wilcox.test(x, y, paired=TRUE, exact=FALSE, correct={FALSE,TRUE})
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
T, p = stats.wilcoxon(x, y, correction=False)
assert_equal(T, 34)
assert_allclose(p, 0.6948866, rtol=1e-6)
T, p = stats.wilcoxon(x, y, correction=True)
assert_equal(T, 34)
assert_allclose(p, 0.7240817, rtol=1e-6)
def test_wilcoxon_result_attributes():
x = np.array([120, 114, 181, 188, 180, 146, 121, 191, 132, 113, 127, 112])
y = np.array([133, 143, 119, 189, 112, 199, 198, 113, 115, 121, 142, 187])
res = stats.wilcoxon(x, y, correction=False)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes)
def test_wilcoxon_tie():
# Regression test for gh-2391.
# Corresponding R code is:
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=FALSE)
# > result$p.value
# [1] 0.001565402
# > result = wilcox.test(rep(0.1, 10), exact=FALSE, correct=TRUE)
# > result$p.value
# [1] 0.001904195
stat, p = stats.wilcoxon([0.1] * 10)
expected_p = 0.001565402
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
stat, p = stats.wilcoxon([0.1] * 10, correction=True)
expected_p = 0.001904195
assert_equal(stat, 0)
assert_allclose(p, expected_p, rtol=1e-6)
class TestMedianTest(TestCase):
def test_bad_n_samples(self):
# median_test requires at least two samples.
assert_raises(ValueError, stats.median_test, [1, 2, 3])
def test_empty_sample(self):
# Each sample must contain at least one value.
assert_raises(ValueError, stats.median_test, [], [1, 2, 3])
def test_empty_when_ties_ignored(self):
# The grand median is 1, and all values in the first argument are
# equal to the grand median. With ties="ignore", those values are
# ignored, which results in the first sample being (in effect) empty.
# This should raise a ValueError.
assert_raises(ValueError, stats.median_test,
[1, 1, 1, 1], [2, 0, 1], [2, 0], ties="ignore")
def test_empty_contingency_row(self):
# The grand median is 1, and with the default ties="below", all the
# values in the samples are counted as being below the grand median.
# This would result a row of zeros in the contingency table, which is
# an error.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1])
# With ties="above", all the values are counted as above the
# grand median.
assert_raises(ValueError, stats.median_test, [1, 1, 1], [1, 1, 1],
ties="above")
def test_bad_ties(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5],
ties="foo")
def test_bad_nan_policy(self):
assert_raises(ValueError, stats.median_test, [1, 2, 3], [4, 5], nan_policy='foobar')
def test_bad_keyword(self):
assert_raises(TypeError, stats.median_test, [1, 2, 3], [4, 5],
foo="foo")
def test_simple(self):
x = [1, 2, 3]
y = [1, 2, 3]
stat, p, med, tbl = stats.median_test(x, y)
# The median is floating point, but this equality test should be safe.
assert_equal(med, 2.0)
assert_array_equal(tbl, [[1, 1], [2, 2]])
# The expected values of the contingency table equal the contingency
# table, so the statistic should be 0 and the p-value should be 1.
assert_equal(stat, 0)
assert_equal(p, 1)
def test_ties_options(self):
# Test the contingency table calculation.
x = [1, 2, 3, 4]
y = [5, 6]
z = [7, 8, 9]
# grand median is 5.
# Default 'ties' option is "below".
stat, p, m, tbl = stats.median_test(x, y, z)
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 1, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="ignore")
assert_equal(m, 5)
assert_equal(tbl, [[0, 1, 3], [4, 0, 0]])
stat, p, m, tbl = stats.median_test(x, y, z, ties="above")
assert_equal(m, 5)
assert_equal(tbl, [[0, 2, 3], [4, 0, 0]])
def test_nan_policy_options(self):
x = [1, 2, np.nan]
y = [4, 5, 6]
mt1 = stats.median_test(x, y, nan_policy='propagate')
s, p, m, t = stats.median_test(x, y, nan_policy='omit')
assert_equal(mt1, (np.nan, np.nan, np.nan, None))
assert_allclose(s, 0.31250000000000006)
assert_allclose(p, 0.57615012203057869)
assert_equal(m, 4.0)
assert_equal(t, np.array([[0, 2],[2, 1]]))
assert_raises(ValueError, stats.median_test, x, y, nan_policy='raise')
def test_basic(self):
# median_test calls chi2_contingency to compute the test statistic
# and p-value. Make sure it hasn't screwed up the call...
x = [1, 2, 3, 4, 5]
y = [2, 4, 6, 8]
stat, p, m, tbl = stats.median_test(x, y)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, lambda_=0)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, lambda_=0)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
stat, p, m, tbl = stats.median_test(x, y, correction=False)
assert_equal(m, 4)
assert_equal(tbl, [[1, 2], [4, 2]])
exp_stat, exp_p, dof, e = stats.chi2_contingency(tbl, correction=False)
assert_allclose(stat, exp_stat)
assert_allclose(p, exp_p)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
naoyak/Agile_Data_Code_2 | ch07/train_sklearn_model.py | 1 | 5282 | import sys, os, re
sys.path.append("lib")
import utils
import numpy as np
import sklearn
import iso8601
import datetime
print("Imports loaded...")
# Load and check the size of our training data. May take a minute.
print("Original JSON file size: {:,} Bytes".format(os.path.getsize("data/simple_flight_delay_features.jsonl")))
training_data = utils.read_json_lines_file('data/simple_flight_delay_features.jsonl')
print("Training items: {:,}".format(len(training_data))) # 5,714,008
print("Data loaded...")
# Inspect a record before we alter them
print("Size of training data in RAM: {:,} Bytes".format(sys.getsizeof(training_data))) # 50MB
print(training_data[0])
# # Sample down our training data at first...
# sampled_training_data = training_data#np.random.choice(training_data, 1000000)
# print("Sampled items: {:,} Bytes".format(len(training_data)))
# print("Data sampled...")
# Separate our results from the rest of the data, vectorize and size up
results = [record['ArrDelay'] for record in training_data]
results_vector = np.array(results)
sys.getsizeof(results_vector) # 45,712,160 Bytes
print("Results vectorized...")
# Remove the two delay fields and the flight date from our training data
for item in training_data:
item.pop('ArrDelay', None)
item.pop('FlightDate', None)
print("ArrDelay and FlightDate removed from training data...")
# Must convert datetime strings to unix times
for item in training_data:
if isinstance(item['CRSArrTime'], str):
dt = iso8601.parse_date(item['CRSArrTime'])
unix_time = int(dt.timestamp())
item['CRSArrTime'] = unix_time
if isinstance(item['CRSDepTime'], str):
dt = iso8601.parse_date(item['CRSDepTime'])
unix_time = int(dt.timestamp())
item['CRSDepTime'] = unix_time
print("Datetimes converted to unix times...")
# Use DictVectorizer to convert feature dicts to vectors
from sklearn.feature_extraction import DictVectorizer
print("Original dimensions: [{:,}]".format(len(training_data)))
vectorizer = DictVectorizer()
training_vectors = vectorizer.fit_transform(training_data)
print("Size of DictVectorized vectors: {:,} Bytes".format(training_vectors.data.nbytes))
print("Training data vectorized...")
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
training_vectors,
results_vector,
test_size=0.1,
random_state=43
)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
print("Test train split performed...")
# Train a regressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split, cross_val_predict
from sklearn.metrics import median_absolute_error, r2_score
print("Regressor library and metrics imported...")
regressor = LinearRegression()
print("Regressor instantiated...")
from sklearn.ensemble import GradientBoostingRegressor
regressor = GradientBoostingRegressor
print("Swapped gradient boosting trees for linear regression!")
# Lets go back for now...
regressor = LinearRegression()
print("Swapped back to linear regression!")
regressor.fit(X_train, y_train)
print("Regressor fitted...")
predicted = regressor.predict(X_test)
print("Predictions made for X_test...")
# Definitions from http://scikit-learn.org/stable/modules/model_evaluation.html
from sklearn.metrics import median_absolute_error, r2_score
# Median absolute error is the median of all absolute differences between the target and the prediction.
# Less is better, more indicates a high error between target and prediction.
medae = median_absolute_error(y_test, predicted)
print("Median absolute error: {:.3g}".format(medae))
# R2 score is the coefficient of determination. Ranges from 1-0, 1.0 is best, 0.0 is worst.
# Measures how well future samples are likely to be predicted.
r2 = r2_score(y_test, predicted)
print("r2 score: {:.3g}".format(r2))
# Plot outputs, compare actual vs predicted values
# import matplotlib.pyplot as plt
#
# plt.scatter(
# y_test,
# predicted,
# color='blue',
# linewidth=1
# )
#
# plt.xticks(())
# plt.yticks(())
#
# plt.show()
#
# Persist model using pickle
#
print("Testing model persistance...")
import pickle
project_home = os.environ["PROJECT_HOME"]
# Dump the model itself
regressor_path = "{}/data/sklearn_regressor.pkl".format(project_home)
regressor_bytes = pickle.dumps(regressor)
model_f = open(regressor_path, 'wb')
model_f.write(regressor_bytes)
# Dump the DictVectorizer that vectorizes the features
vectorizer_path = "{}/data/sklearn_vectorizer.pkl".format(project_home)
vectorizer_bytes = pickle.dumps(vectorizer)
vectorizer_f = open(vectorizer_path, 'wb')
vectorizer_f.write(vectorizer_bytes)
# Load the model itself
model_f = open(regressor_path, 'rb')
model_bytes = model_f.read()
regressor = pickle.loads(model_bytes)
# Load the DictVectorizer
vectorizer_f = open(vectorizer_path, 'rb')
vectorizer_bytes = vectorizer_f.read()
vectorizer = pickle.loads(vectorizer_bytes)
#
# Persist model using sklearn.externals.joblib
#
from sklearn.externals import joblib
# Dump the model and vectorizer
joblib.dump(regressor, regressor_path)
joblib.dump(vectorizer, vectorizer_path)
# Load the model and vectorizer
regressor = joblib.load(regressor_path)
vectorizer = joblib.load(vectorizer_path)
| mit |
mne-tools/mne-tools.github.io | 0.22/_downloads/433df1c5704b0eae2b3e5b8d29f9ddcd/plot_45_projectors_background.py | 9 | 22444 | # -*- coding: utf-8 -*-
"""
.. _tut-projectors-background:
Background on projectors and projections
========================================
This tutorial provides background information on projectors and Signal Space
Projection (SSP), and covers loading and saving projectors, adding and removing
projectors from Raw objects, the difference between "applied" and "unapplied"
projectors, and at what stages MNE-Python applies projectors automatically.
.. contents:: Page contents
:local:
:depth: 2
We'll start by importing the Python modules we need; we'll also define a short
function to make it easier to make several plots that look similar:
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa
from scipy.linalg import svd
import mne
def setup_3d_axes():
ax = plt.axes(projection='3d')
ax.view_init(azim=-105, elev=20)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(-1, 5)
ax.set_ylim(-1, 5)
ax.set_zlim(0, 5)
return ax
###############################################################################
# What is a projection?
# ^^^^^^^^^^^^^^^^^^^^^
#
# In the most basic terms, a *projection* is an operation that converts one set
# of points into another set of points, where repeating the projection
# operation on the resulting points has no effect. To give a simple geometric
# example, imagine the point :math:`(3, 2, 5)` in 3-dimensional space. A
# projection of that point onto the :math:`x, y` plane looks a lot like a
# shadow cast by that point if the sun were directly above it:
ax = setup_3d_axes()
# plot the vector (3, 2, 5)
origin = np.zeros((3, 1))
point = np.array([[3, 2, 5]]).T
vector = np.hstack([origin, point])
ax.plot(*vector, color='k')
ax.plot(*point, color='k', marker='o')
# project the vector onto the x,y plane and plot it
xy_projection_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 0]])
projected_point = xy_projection_matrix @ point
projected_vector = xy_projection_matrix @ vector
ax.plot(*projected_vector, color='C0')
ax.plot(*projected_point, color='C0', marker='o')
# add dashed arrow showing projection
arrow_coords = np.concatenate([point, projected_point - point]).flatten()
ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1, color='C1',
linewidth=1, linestyle='dashed')
###############################################################################
#
# .. note::
#
# The ``@`` symbol indicates matrix multiplication on NumPy arrays, and was
# introduced in Python 3.5 / NumPy 1.10. The notation ``plot(*point)`` uses
# Python `argument expansion`_ to "unpack" the elements of ``point`` into
# separate positional arguments to the function. In other words,
# ``plot(*point)`` expands to ``plot(3, 2, 5)``.
#
# Notice that we used matrix multiplication to compute the projection of our
# point :math:`(3, 2, 5)`onto the :math:`x, y` plane:
#
# .. math::
#
# \left[
# \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix}
# \right]
# \left[ \begin{matrix} 3 \\ 2 \\ 5 \end{matrix} \right] =
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right]
#
# ...and that applying the projection again to the result just gives back the
# result again:
#
# .. math::
#
# \left[
# \begin{matrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 & 0 & 0 \end{matrix}
# \right]
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right] =
# \left[ \begin{matrix} 3 \\ 2 \\ 0 \end{matrix} \right]
#
# From an information perspective, this projection has taken the point
# :math:`x, y, z` and removed the information about how far in the :math:`z`
# direction our point was located; all we know now is its position in the
# :math:`x, y` plane. Moreover, applying our projection matrix to *any point*
# in :math:`x, y, z` space will reduce it to a corresponding point on the
# :math:`x, y` plane. The term for this is a *subspace*: the projection matrix
# projects points in the original space into a *subspace* of lower dimension
# than the original. The reason our subspace is the :math:`x,y` plane (instead
# of, say, the :math:`y,z` plane) is a direct result of the particular values
# in our projection matrix.
#
#
# Example: projection as noise reduction
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Another way to describe this "loss of information" or "projection into a
# subspace" is to say that projection reduces the rank (or "degrees of
# freedom") of the measurement — here, from 3 dimensions down to 2. On the
# other hand, if you know that measurement component in the :math:`z` direction
# is just noise due to your measurement method, and all you care about are the
# :math:`x` and :math:`y` components, then projecting your 3-dimensional
# measurement into the :math:`x, y` plane could be seen as a form of noise
# reduction.
#
# Of course, it would be very lucky indeed if all the measurement noise were
# concentrated in the :math:`z` direction; you could just discard the :math:`z`
# component without bothering to construct a projection matrix or do the matrix
# multiplication. Suppose instead that in order to take that measurement you
# had to pull a trigger on a measurement device, and the act of pulling the
# trigger causes the device to move a little. If you measure how
# trigger-pulling affects measurement device position, you could then "correct"
# your real measurements to "project out" the effect of the trigger pulling.
# Here we'll suppose that the average effect of the trigger is to move the
# measurement device by :math:`(3, -1, 1)`:
trigger_effect = np.array([[3, -1, 1]]).T
###############################################################################
# Knowing that, we can compute a plane that is orthogonal to the effect of the
# trigger (using the fact that a plane through the origin has equation
# :math:`Ax + By + Cz = 0` given a normal vector :math:`(A, B, C)`), and
# project our real measurements onto that plane.
# compute the plane orthogonal to trigger_effect
x, y = np.meshgrid(np.linspace(-1, 5, 61), np.linspace(-1, 5, 61))
A, B, C = trigger_effect
z = (-A * x - B * y) / C
# cut off the plane below z=0 (just to make the plot nicer)
mask = np.where(z >= 0)
x = x[mask]
y = y[mask]
z = z[mask]
###############################################################################
# Computing the projection matrix from the ``trigger_effect`` vector is done
# using `singular value decomposition <svd_>`_ (SVD); interested readers may
# consult the internet or a linear algebra textbook for details on this method.
# With the projection matrix in place, we can project our original vector
# :math:`(3, 2, 5)` to remove the effect of the trigger, and then plot it:
# sphinx_gallery_thumbnail_number = 2
# compute the projection matrix
U, S, V = svd(trigger_effect, full_matrices=False)
trigger_projection_matrix = np.eye(3) - U @ U.T
# project the vector onto the orthogonal plane
projected_point = trigger_projection_matrix @ point
projected_vector = trigger_projection_matrix @ vector
# plot the trigger effect and its orthogonal plane
ax = setup_3d_axes()
ax.plot_trisurf(x, y, z, color='C2', shade=False, alpha=0.25)
ax.quiver3D(*np.concatenate([origin, trigger_effect]).flatten(),
arrow_length_ratio=0.1, color='C2', alpha=0.5)
# plot the original vector
ax.plot(*vector, color='k')
ax.plot(*point, color='k', marker='o')
offset = np.full((3, 1), 0.1)
ax.text(*(point + offset).flat, '({}, {}, {})'.format(*point.flat), color='k')
# plot the projected vector
ax.plot(*projected_vector, color='C0')
ax.plot(*projected_point, color='C0', marker='o')
offset = np.full((3, 1), -0.2)
ax.text(*(projected_point + offset).flat,
'({}, {}, {})'.format(*np.round(projected_point.flat, 2)),
color='C0', horizontalalignment='right')
# add dashed arrow showing projection
arrow_coords = np.concatenate([point, projected_point - point]).flatten()
ax.quiver3D(*arrow_coords, length=0.96, arrow_length_ratio=0.1,
color='C1', linewidth=1, linestyle='dashed')
###############################################################################
# Just as before, the projection matrix will map *any point* in :math:`x, y, z`
# space onto that plane, and once a point has been projected onto that plane,
# applying the projection again will have no effect. For that reason, it should
# be clear that although the projected points vary in all three :math:`x`,
# :math:`y`, and :math:`z` directions, the set of projected points have only
# two *effective* dimensions (i.e., they are constrained to a plane).
#
# .. sidebar:: Terminology
#
# In MNE-Python, the matrix used to project a raw signal into a subspace is
# usually called a :term:`projector <projector>` or a *projection
# operator* — these terms are interchangeable with the term *projection
# matrix* used above.
#
# Projections of EEG or MEG signals work in very much the same way: the point
# :math:`x, y, z` corresponds to the value of each sensor at a single time
# point, and the projection matrix varies depending on what aspects of the
# signal (i.e., what kind of noise) you are trying to project out. The only
# real difference is that instead of a single 3-dimensional point :math:`(x, y,
# z)` you're dealing with a time series of :math:`N`-dimensional "points" (one
# at each sampling time), where :math:`N` is usually in the tens or hundreds
# (depending on how many sensors your EEG/MEG system has). Fortunately, because
# projection is a matrix operation, it can be done very quickly even on signals
# with hundreds of dimensions and tens of thousands of time points.
#
#
# .. _ssp-tutorial:
#
# Signal-space projection (SSP)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We mentioned above that the projection matrix will vary depending on what
# kind of noise you are trying to project away. Signal-space projection (SSP)
# :footcite:`UusitaloIlmoniemi1997` is a way of estimating what that projection
# matrix should be, by
# comparing measurements with and without the signal of interest. For example,
# you can take additional "empty room" measurements that record activity at the
# sensors when no subject is present. By looking at the spatial pattern of
# activity across MEG sensors in an empty room measurement, you can create one
# or more :math:`N`-dimensional vector(s) giving the "direction(s)" of
# environmental noise in sensor space (analogous to the vector for "effect of
# the trigger" in our example above). SSP is also often used for removing
# heartbeat and eye movement artifacts — in those cases, instead of empty room
# recordings the direction of the noise is estimated by detecting the
# artifacts, extracting epochs around them, and averaging. See
# :ref:`tut-artifact-ssp` for examples.
#
# Once you know the noise vectors, you can create a hyperplane that is
# orthogonal
# to them, and construct a projection matrix to project your experimental
# recordings onto that hyperplane. In that way, the component of your
# measurements associated with environmental noise can be removed. Again, it
# should be clear that the projection reduces the dimensionality of your data —
# you'll still have the same number of sensor signals, but they won't all be
# *linearly independent* — but typically there are tens or hundreds of sensors
# and the noise subspace that you are eliminating has only 3-5 dimensions, so
# the loss of degrees of freedom is usually not problematic.
#
#
# Projectors in MNE-Python
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# In our example data, :ref:`SSP <ssp-tutorial>` has already been performed
# using empty room recordings, but the :term:`projectors <projector>` are
# stored alongside the raw data and have not been *applied* yet (or,
# synonymously, the projectors are not *active* yet). Here we'll load
# the :ref:`sample data <sample-dataset>` and crop it to 60 seconds; you can
# see the projectors in the output of :func:`~mne.io.read_raw_fif` below:
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60).load_data()
###############################################################################
# In MNE-Python, the environmental noise vectors are computed using `principal
# component analysis <pca_>`_, usually abbreviated "PCA", which is why the SSP
# projectors usually have names like "PCA-v1". (Incidentally, since the process
# of performing PCA uses `singular value decomposition <svd_>`_ under the hood,
# it is also common to see phrases like "projectors were computed using SVD" in
# published papers.) The projectors are stored in the ``projs`` field of
# ``raw.info``:
print(raw.info['projs'])
###############################################################################
# ``raw.info['projs']`` is an ordinary Python :class:`list` of
# :class:`~mne.Projection` objects, so you can access individual projectors by
# indexing into it. The :class:`~mne.Projection` object itself is similar to a
# Python :class:`dict`, so you can use its ``.keys()`` method to see what
# fields it contains (normally you don't need to access its properties
# directly, but you can if necessary):
first_projector = raw.info['projs'][0]
print(first_projector)
print(first_projector.keys())
###############################################################################
# The :class:`~mne.io.Raw`, :class:`~mne.Epochs`, and :class:`~mne.Evoked`
# objects all have a boolean :attr:`~mne.io.Raw.proj` attribute that indicates
# whether there are any unapplied / inactive projectors stored in the object.
# In other words, the :attr:`~mne.io.Raw.proj` attribute is ``True`` if at
# least one :term:`projector` is present and all of them are active. In
# addition, each individual projector also has a boolean ``active`` field:
print(raw.proj)
print(first_projector['active'])
###############################################################################
# Computing projectors
# ~~~~~~~~~~~~~~~~~~~~
#
# In MNE-Python, SSP vectors can be computed using general purpose functions
# :func:`mne.compute_proj_raw`, :func:`mne.compute_proj_epochs`, and
# :func:`mne.compute_proj_evoked`. The general assumption these functions make
# is that the data passed contains raw data, epochs or averages of the artifact
# you want to repair via projection. In practice this typically involves
# continuous raw data of empty room recordings or averaged ECG or EOG
# artifacts. A second set of high-level convenience functions is provided to
# compute projection vectors for typical use cases. This includes
# :func:`mne.preprocessing.compute_proj_ecg` and
# :func:`mne.preprocessing.compute_proj_eog` for computing the ECG and EOG
# related artifact components, respectively; see :ref:`tut-artifact-ssp` for
# examples of these uses. For computing the EEG reference signal as a
# projector, the function :func:`mne.set_eeg_reference` can be used; see
# :ref:`tut-set-eeg-ref` for more information.
#
# .. warning:: It is best to compute projectors only on channels that will be
# used (e.g., excluding bad channels). This ensures that
# projection vectors will remain ortho-normalized and that they
# properly capture the activity of interest.
#
#
# Visualizing the effect of projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# You can see the effect the projectors are having on the measured signal by
# comparing plots with and without the projectors applied. By default,
# ``raw.plot()`` will apply the projectors in the background before plotting
# (without modifying the :class:`~mne.io.Raw` object); you can control this
# with the boolean ``proj`` parameter as shown below, or you can turn them on
# and off interactively with the projectors interface, accessed via the
# :kbd:`Proj` button in the lower right corner of the plot window. Here we'll
# look at just the magnetometers, and a 2-second sample from the beginning of
# the file.
mags = raw.copy().crop(tmax=2).pick_types(meg='mag')
for proj in (False, True):
fig = mags.plot(butterfly=True, proj=proj)
fig.subplots_adjust(top=0.9)
fig.suptitle('proj={}'.format(proj), size='xx-large', weight='bold')
###############################################################################
# Additional ways of visualizing projectors are covered in the tutorial
# :ref:`tut-artifact-ssp`.
#
#
# Loading and saving projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# SSP can be used for other types of signal cleaning besides just reduction of
# environmental noise. You probably noticed two large deflections in the
# magnetometer signals in the previous plot that were not removed by the
# empty-room projectors — those are artifacts of the subject's heartbeat. SSP
# can be used to remove those artifacts as well. The sample data includes
# projectors for heartbeat noise reduction that were saved in a separate file
# from the raw data, which can be loaded with the :func:`mne.read_proj`
# function:
ecg_proj_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_ecg-proj.fif')
ecg_projs = mne.read_proj(ecg_proj_file)
print(ecg_projs)
###############################################################################
# There is a corresponding :func:`mne.write_proj` function that can be used to
# save projectors to disk in ``.fif`` format:
#
# .. code-block:: python3
#
# mne.write_proj('heartbeat-proj.fif', ecg_projs)
#
# .. note::
#
# By convention, MNE-Python expects projectors to be saved with a filename
# ending in ``-proj.fif`` (or ``-proj.fif.gz``), and will issue a warning
# if you forgo this recommendation.
#
#
# Adding and removing projectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Above, when we printed the ``ecg_projs`` list that we loaded from a file, it
# showed two projectors for gradiometers (the first two, marked "planar"), two
# for magnetometers (the middle two, marked "axial"), and two for EEG sensors
# (the last two, marked "eeg"). We can add them to the :class:`~mne.io.Raw`
# object using the :meth:`~mne.io.Raw.add_proj` method:
raw.add_proj(ecg_projs)
###############################################################################
# To remove projectors, there is a corresponding method
# :meth:`~mne.io.Raw.del_proj` that will remove projectors based on their index
# within the ``raw.info['projs']`` list. For the special case of replacing the
# existing projectors with new ones, use
# ``raw.add_proj(ecg_projs, remove_existing=True)``.
#
# To see how the ECG projectors affect the measured signal, we can once again
# plot the data with and without the projectors applied (though remember that
# the :meth:`~mne.io.Raw.plot` method only *temporarily* applies the projectors
# for visualization, and does not permanently change the underlying data).
# We'll compare the ``mags`` variable we created above, which had only the
# empty room SSP projectors, to the data with both empty room and ECG
# projectors:
mags_ecg = raw.copy().crop(tmax=2).pick_types(meg='mag')
for data, title in zip([mags, mags_ecg], ['Without', 'With']):
fig = data.plot(butterfly=True, proj=True)
fig.subplots_adjust(top=0.9)
fig.suptitle('{} ECG projector'.format(title), size='xx-large',
weight='bold')
###############################################################################
# When are projectors "applied"?
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# By default, projectors are applied when creating :class:`epoched
# <mne.Epochs>` data from :class:`~mne.io.Raw` data, though application of the
# projectors can be *delayed* by passing ``proj=False`` to the
# :class:`~mne.Epochs` constructor. However, even when projectors have not been
# applied, the :meth:`mne.Epochs.get_data` method will return data *as if the
# projectors had been applied* (though the :class:`~mne.Epochs` object will be
# unchanged). Additionally, projectors cannot be applied if the data are not
# :ref:`preloaded <memory>`. If the data are `memory-mapped`_ (i.e., not
# preloaded), you can check the ``_projector`` attribute to see whether any
# projectors will be applied once the data is loaded in memory.
#
# Finally, when performing inverse imaging (i.e., with
# :func:`mne.minimum_norm.apply_inverse`), the projectors will be
# automatically applied. It is also possible to apply projectors manually when
# working with :class:`~mne.io.Raw`, :class:`~mne.Epochs` or
# :class:`~mne.Evoked` objects via the object's :meth:`~mne.io.Raw.apply_proj`
# method. For all instance types, you can always copy the contents of
# :samp:`{<instance>}.info['projs']` into a separate :class:`list` variable,
# use :samp:`{<instance>}.del_proj({<index of proj(s) to remove>})` to remove
# one or more projectors, and then add them back later with
# :samp:`{<instance>}.add_proj({<list containing projs>})` if desired.
#
# .. warning::
#
# Remember that once a projector is applied, it can't be un-applied, so
# during interactive / exploratory analysis it's a good idea to use the
# object's :meth:`~mne.io.Raw.copy` method before applying projectors.
#
#
# Best practices
# ~~~~~~~~~~~~~~
#
# In general, it is recommended to apply projectors when creating
# :class:`~mne.Epochs` from :class:`~mne.io.Raw` data. There are two reasons
# for this recommendation:
#
# 1. It is computationally cheaper to apply projectors to data *after* the
# data have been reducted to just the segments of interest (the epochs)
#
# 2. If you are applying amplitude-based rejection criteria to epochs, it is
# preferable to reject based on the signal *after* projectors have been
# applied, because the projectors may reduce noise in some epochs to
# tolerable levels (thereby increasing the number of acceptable epochs and
# consequenty increasing statistical power in any later analyses).
#
#
# References
# ^^^^^^^^^^
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _`argument expansion`:
# https://docs.python.org/3/tutorial/controlflow.html#tut-unpacking-arguments
# .. _`pca`: https://en.wikipedia.org/wiki/Principal_component_analysis
# .. _`svd`: https://en.wikipedia.org/wiki/Singular_value_decomposition
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/axes_zoom_effect.py | 3 | 3293 | from matplotlib.transforms import Bbox, TransformedBbox, \
blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\
BboxConnectorPatch
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
#loc1a=3, loc2a=2, loc1b=4, loc2b=1,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used ti create
patches.
"""
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect02(ax1, ax2, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
mybbox1 = ax1.bbox
mybbox2 = TransformedBbox(ax1.viewLim, trans)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
import matplotlib.pyplot as plt
plt.figure(1, figsize=(5,5))
ax1 = plt.subplot(221)
ax2 = plt.subplot(212)
ax2.set_xlim(0, 1)
ax2.set_xlim(0, 5)
zoom_effect01(ax1, ax2, 0.2, 0.8)
ax1 = plt.subplot(222)
ax1.set_xlim(2, 3)
ax2.set_xlim(0, 5)
zoom_effect02(ax1, ax2)
plt.show()
| gpl-2.0 |
pratapvardhan/scikit-image | doc/examples/edges/plot_line_hough_transform.py | 7 | 4757 | """
=============================
Straight line Hough transform
=============================
The Hough transform in its simplest form is a `method to detect straight lines
<http://en.wikipedia.org/wiki/Hough_transform>`__.
In the following example, we construct an image with a line intersection. We
then use the Hough transform to explore a parameter space for straight lines
that may run through the image.
Algorithm overview
------------------
Usually, lines are parameterised as :math:`y = mx + c`, with a gradient
:math:`m` and y-intercept `c`. However, this would mean that :math:`m` goes to
infinity for vertical lines. Instead, we therefore construct a segment
perpendicular to the line, leading to the origin. The line is represented by
the length of that segment, :math:`r`, and the angle it makes with the x-axis,
:math:`\theta`.
The Hough transform constructs a histogram array representing the parameter
space (i.e., an :math:`M \times N` matrix, for :math:`M` different values of
the radius and :math:`N` different values of :math:`\theta`). For each
parameter combination, :math:`r` and :math:`\theta`, we then find the number of
non-zero pixels in the input image that would fall close to the corresponding
line, and increment the array at position :math:`(r, \theta)` appropriately.
We can think of each non-zero pixel "voting" for potential line candidates. The
local maxima in the resulting histogram indicates the parameters of the most
probably lines. In our example, the maxima occur at 45 and 135 degrees,
corresponding to the normal vector angles of each line.
Another approach is the Progressive Probabilistic Hough Transform [1]_. It is
based on the assumption that using a random subset of voting points give a good
approximation to the actual result, and that lines can be extracted during the
voting process by walking along connected components. This returns the
beginning and end of each line segment, which is useful.
The function `probabilistic_hough` has three parameters: a general threshold
that is applied to the Hough accumulator, a minimum line length and the line
gap that influences line merging. In the example below, we find lines longer
than 10 with a gap less than 3 pixels.
References
----------
.. [1] C. Galamhos, J. Matas and J. Kittler,"Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
.. [2] Duda, R. O. and P. E. Hart, "Use of the Hough Transformation to
Detect Lines and Curves in Pictures," Comm. ACM, Vol. 15,
pp. 11-15 (January, 1972)
"""
from matplotlib import cm
from skimage.transform import (hough_line, hough_line_peaks,
probabilistic_hough_line)
from skimage.feature import canny
from skimage import data
import numpy as np
import matplotlib.pyplot as plt
# Constructing test image.
image = np.zeros((100, 100))
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
# Classic straight-line Hough transform.
h, theta, d = hough_line(image)
# Generating figure 1.
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(12, 6))
plt.tight_layout()
ax0.imshow(image, cmap=cm.gray)
ax0.set_title('Input image')
ax0.set_axis_off()
ax1.imshow(np.log(1 + h), extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
d[-1], d[0]], cmap=cm.gray, aspect=1/1.5)
ax1.set_title('Hough transform')
ax1.set_xlabel('Angles (degrees)')
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')
ax2.imshow(image, cmap=cm.gray)
row1, col1 = image.shape
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)
ax2.plot((0, col1), (y0, y1), '-r')
ax2.axis((0, col1, row1, 0))
ax2.set_title('Detected lines')
ax2.set_axis_off()
# Line finding using the Probabilistic Hough Transform.
image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges, threshold=10, line_length=5,
line_gap=3)
# Generating figure 2.
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(16, 6), sharex=True,
sharey=True)
plt.tight_layout()
ax0.imshow(image, cmap=cm.gray)
ax0.set_title('Input image')
ax0.set_axis_off()
ax0.set_adjustable('box-forced')
ax1.imshow(edges, cmap=cm.gray)
ax1.set_title('Canny edges')
ax1.set_axis_off()
ax1.set_adjustable('box-forced')
ax2.imshow(edges * 0)
for line in lines:
p0, p1 = line
ax2.plot((p0[0], p1[0]), (p0[1], p1[1]))
row2, col2 = image.shape
ax2.axis((0, col2, row2, 0))
ax2.set_title('Probabilistic Hough')
ax2.set_axis_off()
ax2.set_adjustable('box-forced')
plt.show()
| bsd-3-clause |
appapantula/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
M4573R/BuildingMachineLearningSystemsWithPython | ch11/demo_mds.py | 25 | 3724 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import numpy as np
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import linear_model, manifold, decomposition, datasets
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
# all examples will have three classes in this file
colors = ['r', 'g', 'b']
markers = ['o', 6, '*']
def plot_demo_1():
X = np.c_[np.ones(5), 2 * np.ones(5), 10 * np.ones(5)].T
y = np.array([0, 1, 2])
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 2 dimensions")
filename = "mds_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_iris_mds():
iris = datasets.load_iris()
X = iris.data
y = iris.target
# MDS
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 2 dimensions")
filename = "mds_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
# PCA
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
pca = decomposition.PCA(n_components=3)
Xtrans = pca.fit(X).transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 3 dimensions")
ax.view_init(50, -35)
pca = decomposition.PCA(n_components=2)
Xtrans = pca.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 2 dimensions")
filename = "pca_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_demo_1()
plot_iris_mds()
| mit |
vibhaa/propane | scripts/propane/backbone.py | 2 | 4328 | import csv
import os.path
import numpy as np
import matplotlib.pyplot as plt
# collect stats from csv
num_pods = []
num_nodes = []
sizes_raw = []
sizes_compressed = []
tpp_total_mean = []
tpp_build_mean = []
tpp_minimize_mean = []
tpp_order_mean = []
tpp_gen_mean = []
tpp_compress_mean = []
# read values from csv file
direct = os.path.dirname(os.path.realpath(__file__))
with open(direct + os.path.sep + 'stats-backbone.csv') as f:
r = csv.reader(f)
for row in r:
num_pods.append(row[0])
num_nodes.append(row[1])
sizes_raw.append(row[4])
sizes_compressed.append(row[5])
tpp_total_mean.append(row[10])
tpp_build_mean.append(row[13])
tpp_minimize_mean.append(row[16])
tpp_order_mean.append(row[19])
tpp_gen_mean.append(row[22])
tpp_compress_mean.append(row[25])
# remove header info, and convert type
num_pods = map(int, num_pods[1:])
num_nodes = map(int, num_nodes[1:])
sizes_raw = map(int, sizes_raw[1:])
sizes_compressed = map(int, sizes_compressed[1:])
tpp_total_mean = map(float, tpp_total_mean[1:])
tpp_build_mean = map(float, tpp_build_mean[1:])
tpp_minimize_mean = map(float, tpp_minimize_mean[1:])
tpp_order_mean = map(float, tpp_order_mean[1:])
tpp_gen_mean = map(float, tpp_gen_mean[1:])
tpp_compress_mean = map(float, tpp_compress_mean[1:])
#====================================================
#
# Stack plot of compilation times broken down by task
#
#====================================================
# stack data lowest -> highest (build, minimize, order, gen, compress)
data = (tpp_build_mean, tpp_minimize_mean, tpp_order_mean, tpp_gen_mean)
foo = np.row_stack( data )
y_stack = np.cumsum(foo, axis=0)
# plot colors
#color1 = "#FFC09F"
#color2 = "#FFEE93"
#color3 = "#FCF5C7"
#color4 = "#A0CED9"
#color5 = "#ADF7B6"
color1 = "#828A95"
color2 = "#CEEAF7"
color3 = "#CCD7E4"
color4 = "#D5C9DF"
# color5 = "#DCB8CB"
# stacked plot showing different running times
fig = plt.figure()
plt.grid()
ax1 = fig.add_subplot(111)
ax1.fill_between(num_nodes, 0, y_stack[0,:], facecolor=color1, alpha=.7)
ax1.fill_between(num_nodes, y_stack[0,:], y_stack[1,:], facecolor=color2, alpha=.7)
ax1.fill_between(num_nodes, y_stack[1,:], y_stack[2,:], facecolor=color3)
ax1.fill_between(num_nodes, y_stack[2,:], y_stack[3,:], facecolor=color4)
ax1.set_xlabel('Routers', fontsize=35)
ax1.set_ylabel('Avg. Time / Predicate (s)', fontsize=35)
ax1.tick_params(axis='both', which='major', labelsize=35)
ax1.tick_params(axis='both', which='minor', labelsize=35)
ax1.xaxis.set_ticks([0,40,80,120,160,200])
ax1.yaxis.set_ticks([5,15,25,35,45])
#ax1.set_xlim([0,1400])
#ax1.set_ylim([0,20])
# custom legend for stack color
p1 = plt.Rectangle((0, 0), 1, 1, fc=color1, alpha=.7)
p2 = plt.Rectangle((0, 0), 1, 1, fc=color2, alpha=.7)
p3 = plt.Rectangle((0, 0), 1, 1, fc=color3, alpha=.7)
p4 = plt.Rectangle((0, 0), 1, 1, fc=color4, alpha=.7)
leg_boxes = [p4, p3, p2, p1]
descrs = ["Gen/Min ABGP", "Find Preferences", "Minimize PG", "Construct PG"]
ax1.legend(leg_boxes, descrs, loc=2, fontsize=24)
fig.savefig('compilation-times-backbone.png', bbox_inches='tight')
#====================================================
#
# Size of generated vs compressed ABGP (bar)
#
#====================================================
num_nodes1 = num_nodes
num_nodes2 = map(lambda x: x, num_nodes)
sizes_raw_per = map(lambda (size,n): size/n, zip(sizes_raw, num_nodes))
sizes_compressed_per = map(lambda (size,n): size/n, zip(sizes_compressed, num_nodes))
num_nodes1 = num_nodes1[2::5]
num_nodes2 = num_nodes2[2::5]
sizes_raw_per = sizes_raw_per[2::5]
sizes_compressed_per = sizes_compressed_per[2::5]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.bar(num_nodes1, sizes_raw_per, width=5.2, color=color1, alpha=1, align='center', log=True)
ax1.bar(num_nodes2, sizes_compressed_per, width=5.2, color=color3, alpha=1, align='center',log=True)
ax1.set_xlabel('Routers', fontsize=35)
ax1.set_ylabel('ABGP Lines/Router', fontsize=35)
ax1.tick_params(axis='both', which='major', labelsize=35)
ax1.tick_params(axis='both', which='minor', labelsize=35)
ax1.set_xlim([0,220])
ax1.set_ylim([0,10*10*10*10*10*10*10])
leg_boxes = [p1, p3]
descrs = ["Raw Config", "Minimized Config"]
ax1.legend(leg_boxes, descrs, loc=2, fontsize=24)
fig.savefig('config-compression-backbone.png', bbox_inches='tight') | mit |
wanatpj/h_blind | diffenerce_histogram.py | 1 | 1713 | import Image
import matplotlib.pyplot as plt
import numpy
import os
from optparse import OptionParser
from common import *
def _parse_flags():
global indir, rangeradius
parser = OptionParser()
parser.add_option("-i",
"--in",
dest="indir",
help="directory that containes images for which the difference histogram"\
+ " will be computed",
metavar="DIR")
parser.add_option("-r",
"--rangeradius",
dest="rangeradius",
help="range of the histogram",
metavar="NUMBER")
(options, args) = parser.parse_args()
if not options.indir or not options.rangeradius:
parser.error('Not all flags specified; run with --help to see the flags;')
indir = options.indir
rangeradius = int(options.rangeradius)
def extract_differences(f):
with Image.open(f) as image:
width, height = image.size
result = []
img = image.convert("L").load()
for x in range(width - 1):
for y in range(height):
result.append(img[x, y] - img[x + 1, y])
for x in range(width):
for y in range(height - 1):
result.append(img[x, y] - img[x, y + 1])
return result
def histogram_reduce(histogram, values):
for value in values:
histogram[value + 255] += 1
return histogram
def main():
global indir, rangeradius
_parse_flags()
normalize_file_names_fn = numpy.vectorize(lambda x: indir + "/" + x)
result = map_reduce(normalize_file_names_fn(os.listdir(indir)),\
extract_differences,\
histogram_reduce,
numpy.zeros(256 + 255, dtype=numpy.uint64))
plt.bar(numpy.arange(-rangeradius, rangeradius + 1),
result[255 - rangeradius : 255 + rangeradius + 1],
align='center')
plt.show()
main()
| gpl-3.0 |
v00d00dem0n/PyCrashCourse | work/ch16/highs_lows.py | 1 | 1374 | import csv
from matplotlib import pyplot as plt
from datetime import datetime as dt
# Get dats and high temps from file
#filename = 'death_valley_2014.csv'
#filename = 'sitka_weather_07-2014.csv'
#filename = 'sitka_weather_2014.csv'
filename = 'denver_weather_2001.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
print(header_row)
for index, column_header in enumerate(header_row):
print(index, column_header.lstrip())
dates, highs, lows = [], [], []
for row in reader:
try:
current_date = dt.strptime(row[0], "%Y-%m-%d")
high = int(row[1])
low = int(row[3])
except ValueError:
print(current_date, 'missing data')
else:
dates.append(current_date)
highs.append(high)
lows.append(low)
# Plot data.
fig = plt.figure(dpi=128, figsize=(10,6))
plt.plot(dates,highs, c='red')
plt.plot(dates,lows, c='blue')
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# Format plot graph.
plt.title("Compare Daily High and Low Temps - 2001 - 2010",
fontsize=18)
plt.xlabel('', fontsize=12)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=12)
plt.tick_params(axis='both', which='major', labelsize=16)
#plt.savefig('death_valley_and_sitka_2014_fig.png', dpi=150)
plt.show() | gpl-3.0 |
abhisg/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
mattions/TimeScales | ecellControl/ecellManager.py | 1 | 8194 | # Author Michele Mattioni
# Fri Jan 30 15:57:01 GMT 2009
#import ecell.Session as Session
#from ecell.Session import Session
try:
from mySession import Session
import ecell.ecs
import ecell.config
import ecell.emc
except ImportError, e:
print "IMPORT ERROR: Ecell not available. Run the model the `bio_on` set as False."
import os
import numpy
from sumatra.external.NeuroTools import parameters
class EcellManager():
"""Control and instatiate the ecell simulator embedding it in an handy python object"""
def __init__(self, filename=None):
ecell.ecs.setDMSearchPath( os.pathsep.join( ecell.config.dm_path ) )
self.sim = ecell.emc.Simulator()
if ecell.config.version < '3.2.0':
self.ses = Session(self.sim, changeDirectory=False)
else:
self.ses = Session(self.sim)
# Load the model
self.ses.loadModel(filename)
self.molToTrack = ('ca',
'moles_bound_ca_per_moles_cam',
'Rbar',
'PP2Bbar',
'CaMKIIbar',
'PP1abar', # Active PP1/Total PP1
'AMPAR', #
'AMPAR_P',
'D',
'totDp',
'Dpbar'
)
# Tracking the calcium
self.ca = self.ses.createEntityStub( 'Variable:/Spine:ca' )
self.CaMKIIbar = self.ses.createEntityStub( 'Variable:/Spine:CaMKIIbar' )
self.ampar_P = self.ses.createEntityStub('Variable:/Spine:AMPAR_P')
self.ca_in = self.ses.createEntityStub('Process:/Spine:ca_in')
self.ca_leak = self.ses.createEntityStub('Process:/Spine:ca_leak')
self.ca_pump = self.ses.createEntityStub('Process:/Spine:ca_pump')
def createLoggers(self):
"""Create the logger to track the species"""
loggers = {}
#log = ecell.LoggerStub()
for mol in self.molToTrack:
loggers[mol] = self.ses.createLoggerStub( "Variable:/Spine:" + mol
+ ":Value" )
loggers[mol].create() # This creat the Logger Object in the backend
if mol == 'ca':
loggers['ca_conc'] = self.ses.createLoggerStub( "Variable:/Spine:" + mol
+ ":MolarConc" )
loggers['ca_conc'].create() # This creat the Logger Object in the backend
self.loggers = loggers
def calcWeight(CaMKIIbar, PP2Bbar, alpha, beta, n=3, k=0.5):
"""Calc the weight of the synapses according to the CaMKII and Pospahtases
PP2B and PP1"""
# CaMKII term
CaMKII_factor = math.pow(CaMKIIbar, n) / (math.pow(k, n) +
math.pow(CaMKIIbar, n))
Phosphatase_factor = math.pow(PP2Bbar, n) / (math.pow(k, n) +
math.pow(PP2Bbar, n))
scaled_CaMKII_factor = alpha * CaMKII_factor
scaled_Phospatese_factor = beta * Phosphatase_factor
weight = 1 + scaled_CaMKII_factor - scaled_Phospatese_factor
s = "Weight: %s CaMKII factor %s, Phosphatase factor %s" %(weight,
scaled_CaMKII_factor,
scaled_Phospatese_factor)
return weight
def calcium_peak(self, k_value, duration):
"""
Mimic the calcium peak
:Parameters
k_value: the rate of calcium to enter
duration: Duration of the spike
"""
basal = self.ca_in['k']
self.ca_in['k'] = k_value
self.ses.run(duration)
self.ca_in['k'] = basal
def calciumTrain(self, spikes=30, interval=0.1):
"""Create a train of calcium with the specified number of spikes and interval
:Parameter
spikes: number of spikes
interval: Interval between spikes
"""
for i in range(spikes):
self.calcium_peak(4.0e8, # Magic number from Lu
0.00001 #Really fast spike to avoid the overlap
)
self.ses.run(interval)
def converToTimeCourses(self):
timeCourses = {}
for key in self.loggers:
timeCourses[key] = self.loggers[key].getData()
self.timeCourses = timeCourses
##############################################
# Testing method
def testCalciumTrain(spikes_number, interval, filename):
"""Run a test simulation wit a train of calcium input"""
print "Test the results of a train of calcium"""
ecellManager = EcellManager(filename)
ecellManager.createLoggers()
#ecellManager.ca_in = ecellManager.ses.createEntityStub('Process:/Spine:ca_in')
print "Model loaded, loggers created. Integration start."
ecellManager.ses.run(300)
print "Calcium Train"
ecellManager.calciumTrain(spikes=spikes_number, interval=interval)
ecellManager.ses.run(400)
ecellManager.converToTimeCourses()
print "CalciumTrain Test Concluded\n##################"
return ecellManager
def testChangeCalciumValue(interval, caValue, filename="../biochemical_circuits/biomd183_noCalcium.eml"):
"""Run a test simulation changing the calcium value on the fly"""
print "Show case of the possibilities to change the level of calcium on the fly"
ecellManager = EcellManager(filename)
ecellManager.createLoggers()
print "Loggers created"
print "Running with the updating interval of : %f" %interval
tstop = 150
while(ecellManager.ses.getCurrentTime() < tstop):
ecellManager.ca['Value'] = caValue
ecellManager.ses.run(interval)
#ecellManager.ses.run(1)
#print ecellManager.ses.getCurrentTime()
print "immision of Calcium"
print "Value of Calcium %f" %ecellManager.ca.getProperty('Value')
spikes = 4
for i in range(spikes):
ecellManager.ca['Value'] = 7200
ecellManager.ses.run(0.020)
ecellManager.ca['Value'] = caValue
ecellManager.ses.run(0.010)
tstop = tstop+500
while(ecellManager.ses.getCurrentTime() < tstop):
ecellManager.ca['Value'] = caValue
ecellManager.ses.run(interval)
#ecellManager.ses.run(1)
#print ecellManager.ses.getCurrentTime()
ecellManager.converToTimeCourses()
print "ChangeCalciumValue Test Concluded"
return ecellManager
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print("No parameter file supplied. Abort.")
usage = 'python ecellManager.py ecellControl.param'
print usage
sys.exit()
parameter_file = sys.argv[1]
param = parameters.ParameterSet(parameter_file)
## Setting the mat plotlib backend
import matplotlib
if param['interactive'] == False:
matplotlib.use('Agg')
print "Switching backend to Agg. Batch execution"
import matplotlib.pyplot as plt
from helpers.plotter import EcellPlotter
import helpers
loader = helpers.Loader()
# ecellManager = testChangeCalciumValue(interval, caValue)
if param['running_type'] == 'train':
ecellManager = testCalciumTrain(param['num_spikes'],
param['delay'],
param['biochemical_filename'])
ecp = EcellPlotter()
if param['interactive'] == False:
dir = loader.create_new_dir(prefix=os.getcwd())
loader.save(ecellManager.timeCourses, dir, "timeCourses")
ecp.plot_timeCourses(ecellManager.timeCourses, save=True, dir=dir)
ecp.plot_weight(ecellManager.timeCourses, dir=dir)
else:
ecp.plot_timeCourses(ecellManager.timeCourses)
ecp.plot_weight(ecellManager.timeCourses)
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/tests/arithmetic/conftest.py | 2 | 5981 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
# ------------------------------------------------------------------
# Helper Functions
def id_func(x):
if isinstance(x, tuple):
assert len(x) == 2
return x[0].__name__ + "-" + str(x[1])
else:
return x.__name__
# ------------------------------------------------------------------
@pytest.fixture(
params=[
("foo", None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
]
)
def names(request):
"""
A 3-tuple of names, the first two for operands, the last for a result.
"""
return request.param
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
"""
Several variants of integer value 1. The zero-dim integer array
behaves like an integer.
This fixture can be used to check that datetimelike indexes handle
addition and subtraction of integers and zero-dimensional arrays
of integers.
Examples
--------
>>> dti = pd.date_range('2016-01-01', periods=2, freq='H')
>>> dti
DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'],
dtype='datetime64[ns]', freq='H')
>>> dti + one
DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'],
dtype='datetime64[ns]', freq='H')
"""
return request.param
zeros = [
box_cls([0] * 5, dtype=dtype)
for box_cls in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]
]
zeros.extend(
[box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [pd.Index, np.array]]
)
zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([np.array(-0.0, dtype=np.float64)])
zeros.extend([0, 0.0, -0.0])
@pytest.fixture(params=zeros)
def zero(request):
"""
Several types of scalar zeros and length 5 vectors of zeros.
This fixture can be used to check that numeric-dtype indexes handle
division by any zero numeric-dtype.
Uses vector of length 5 for broadcasting with `numeric_idx` fixture,
which creates numeric-dtype vectors also of length 5.
Examples
--------
>>> arr = pd.RangeIndex(5)
>>> arr / zeros
Float64Index([nan, inf, inf, inf, inf], dtype='float64')
"""
return request.param
# ------------------------------------------------------------------
# Vector Fixtures
@pytest.fixture(
params=[
pd.Float64Index(np.arange(5, dtype="float64")),
pd.Int64Index(np.arange(5, dtype="int64")),
pd.UInt64Index(np.arange(5, dtype="uint64")),
pd.RangeIndex(5),
],
ids=lambda x: type(x).__name__,
)
def numeric_idx(request):
"""
Several types of numeric-dtypes Index objects
"""
return request.param
# ------------------------------------------------------------------
# Scalar Fixtures
@pytest.fixture(
params=[
pd.Timedelta("5m4s").to_pytimedelta(),
pd.Timedelta("5m4s"),
pd.Timedelta("5m4s").to_timedelta64(),
],
ids=lambda x: type(x).__name__,
)
def scalar_td(request):
"""
Several variants of Timedelta scalars representing 5 minutes and 4 seconds
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Day(3),
pd.offsets.Hour(72),
pd.Timedelta(days=3).to_pytimedelta(),
pd.Timedelta("72:00:00"),
np.timedelta64(3, "D"),
np.timedelta64(72, "h"),
],
ids=lambda x: type(x).__name__,
)
def three_days(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 3-day timedelta
"""
return request.param
@pytest.fixture(
params=[
pd.offsets.Hour(2),
pd.offsets.Minute(120),
pd.Timedelta(hours=2).to_pytimedelta(),
pd.Timedelta(seconds=2 * 3600),
np.timedelta64(2, "h"),
np.timedelta64(120, "m"),
],
ids=lambda x: type(x).__name__,
)
def two_hours(request):
"""
Several timedelta-like and DateOffset objects that each represent
a 2-hour timedelta
"""
return request.param
_common_mismatch = [
pd.offsets.YearBegin(2),
pd.offsets.MonthBegin(1),
pd.offsets.Minute(),
]
@pytest.fixture(
params=[
pd.Timedelta(minutes=30).to_pytimedelta(),
np.timedelta64(30, "s"),
pd.Timedelta(seconds=30),
]
+ _common_mismatch
)
def not_hourly(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Hourly frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(4, "h"),
pd.Timedelta(hours=23).to_pytimedelta(),
pd.Timedelta("23:00:00"),
]
+ _common_mismatch
)
def not_daily(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Daily frequencies.
"""
return request.param
@pytest.fixture(
params=[
np.timedelta64(365, "D"),
pd.Timedelta(days=365).to_pytimedelta(),
pd.Timedelta(days=365),
]
+ _common_mismatch
)
def mismatched_freq(request):
"""
Several timedelta-like and DateOffset instances that are _not_
compatible with Monthly or Annual frequencies.
"""
return request.param
# ------------------------------------------------------------------
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame], ids=id_func)
def box(request):
"""
Several array-like containers that should have effectively identical
behavior with respect to arithmetic operations.
"""
return request.param
@pytest.fixture(params=[pd.Index, pd.Series, pd.DataFrame, tm.to_array], ids=id_func)
def box_with_array(request):
"""
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
classes
"""
return request.param
# alias so we can use the same fixture for multiple parameters in a test
box_with_array2 = box_with_array
| bsd-3-clause |
abitofalchemy/ScientificImpactPrediction | procjson_tograph.py | 1 | 8162 | # -*- coding: utf-8 -*-
__author__ = 'Sal Aguinaga'
__license__ = "GPL"
__version__ = "0.1.0"
__email__ = "[email protected]"
import shelve
import numpy as np
import pandas as pd
import networkx as nx
import math
import argparse
import os
import sa_net_metrics as snm
import matplotlib
import itertools
import pprint as pp
matplotlib.use('pdf')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Links:
# [0] http://www.programcreek.com/python/example/5240/numpy.loadtxt
# [1] http://stackoverflow.com/questions/35782251/python-how-to-color-the-nodes-of-a-network-according-to-their-degree/35786355
def draw_citing_users_follower_count():
df = pd.read_csv('Results/twtrs_follower_network.tsv', sep='\t', header=None)
df.columns = ['src', 'followers']
count_followers = lambda row: len(row[1].split(','))
df['fCnt'] = df.apply(count_followers, axis=1)
edglstdf = pd.read_csv('Results/clustered_relevant_users.tsv', sep='\t', header=None)
eldf = edglstdf.apply(lambda row: [x.lstrip('[').rstrip(']') for x in row])
eldf.columns = ['src','trg']
eldf[['src']] = eldf[['src']].apply(pd.to_numeric)
df = pd.merge(eldf,df, on='src')
df[['src','trg','fCnt']].to_csv('Results/procjson_edglst.tsv', sep='\t', header=False, index=False)
g=nx.Graph()
g.add_edges_from(df[['src','trg']].values)
print nx.info(g)
f, axs = plt.subplots(1, 1, figsize=(1.6*6., 1*6.))
# nx.draw_networkx(g, pos=nx.spring_layout(g), ax=axs, with_labels=False, node_size=df[['fCnt']]/float(len(df)), alpha=.5)
pos=nx.spring_layout(g)
# nx.draw_networkx(g, pos=pos, ax=axs, with_labels=False, alpha=.5, node_size=30)
nx.draw_networkx_edges(g, pos=pos, ax=axs, alpha=0.5, width=0.8)
nx.draw_networkx_nodes(g, pos=pos, ax=axs, nodelist=list(df['src'].values), node_color='#7A83AC', node_size=30, alpha=0.5)
nx.draw_networkx_nodes(g, pos=pos, ax=axs, nodelist=list(df['trg'].values), node_color='k', node_size=20, alpha=0.8)
axs.patch.set_facecolor('None')
axs.set_xticks([]) #[None]# grid(True, which='both')
axs.set_yticks([]) #[None]# grid(True, which='both')
plt.savefig('figures/outfig', bbox_inches='tight', pad_inches=0)
return
def convert_follower_network_2edgelist():
dbg = False
df = pd.read_csv('Results/twtrs_follower_network.tsv', sep='\t', header=None)
edges = []
with open('Results/procjson.tsv', 'w') as fout:
for row in df.iterrows():
# get a count of the followers : a naive approach
users_flist = np.array([long(x) for x in row[1][1].lstrip('[').rstrip(']').split(',') if x != ''])
sampsize = int(math.ceil(len(users_flist) * .05))
# pick 10% of their follower network at random
if len(users_flist) > 1:
idx = np.arange(len(users_flist))
np.random.shuffle(idx)
subsample = users_flist[idx[:sampsize]]
else:
subsample = users_flist
# now get the network for submample
for j, trg in enumerate(subsample):
fout.write('{}\t{}\n'.format(row[1][0], trg)) # ong(trg.strip())))
edges.append((row[1][0], trg))
if dbg: print row[1][0], len(row[1][1].lstrip('[').rstrip(']').split(','))
if dbg: print len(edges)
return edges
def visualize_graph(graph):
if graph is None: return
G = graph
# identify largest connected component
Gcc = sorted(nx.connected_component_subgraphs(G), key=len, reverse=True)
print [len(x) for x in Gcc]
Gcc = Gcc[0]
print nx.info(Gcc)
print 'A'
pos = nx.circular_layout(Gcc)
print 'B'
nx.draw_networkx(Gcc, pos, with_labels=False, width=0.125, node_size=20, alpha=0.5)
# nx.draw(Gcc, pos=nx.spring_layout(G))
# print saving to disk
print 'Saving to disk ...'
plt.savefig('outplot', bb__inches='tight')
df = pd.DataFrame.from_dict(G.degree().items())
df.columns = ['v', 'k']
gb = df.groupby(['k']).count()
gb['pk'] = gb / float(G.number_of_nodes())
print gb.head(), '<= gb'
# gb['deg'] = gb.index.values
print gb.head()
gb['pk'].to_csv('Results/degree.tsv', index=True, sep="\t", header=True)
# draw graph
# G=nx.random_geometric_graph(G.number_of_nodes(),0.125)
# position is stored as node attribute data for random_geometric_graph
# pos=nx.get_node_attributes(G,'pos')
nx.draw_networkx(G, pos=nx.spring_layout(G), node_size=20, with_labels=False, alpha=0.75, weight=0.5)
# print saving to disk
print 'Saving to disk ...'
plt.savefig('outplot', bb__inches='tight')
def main111():
if 1:
G = nx.read_edgelist(infname)
print nx.info(G)
# Graph adj matix
A = nx.to_scipy_sparse_matrix(G)
print type(A)
from scipy import sparse, io
io.mmwrite("Results/test.mtx", A)
exit()
# write to disk clustering coeffs for this graph
snm.get_clust_coeff([G], 'orig', 'mmonth')
# write to disk egienvalue
snm.network_value_distribution([G], [], 'origMmonth')
if 0:
edgelist = np.loadtxt(infname, dtype=str, delimiter='\t')
print edgelist[:4]
idx = np.arange(len(edgelist))
np.random.shuffle(idx)
subsamp_edgelist = edgelist[idx[:100]]
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in subsamp_edgelist])
# visualize this graph
# visualize_graph(G)
exit()
G = nx.Graph()
G.add_edges_from([(long(x), long(y)) for x, y in edgelist])
print nx.info(G)
print 'Done'
def draw_basic_network(G,src_list):
slpos = nx.spring_layout(G) # see this for a great grid layout [1]
nx.draw_networkx(G, pos=slpos, node_color='b', nodelist=src_list, with_labels=False,node_size=20, \
edge_color='#7146CC')
nx.draw_networkx_nodes(G, pos=slpos, node_color='r', nodelist=[x for x in G.nodes() if x not in src_list], \
alpha=0.8, with_labels=False,node_size=20)
plt.savefig('figures/basicfig', bbox_inches='tight', pad_inches=0)
def get_parser():
parser = argparse.ArgumentParser(description='procjson clust | Ex: python procjson_clust.py '+
'Results/tweets_cleaned.tsv')
parser.add_argument("--do-fcount", default=False, action="store_true" , help='draw citing-users & follower count')
parser.add_argument("--do-metrics", default=False, action="store_true" , help='compute metrics and write to disk')
parser.add_argument('--version', action='version', version=__version__)
return parser
def main():
parser = get_parser()
args = vars(parser.parse_args())
print args
''' draw a graph of citing-users and their follower count
output: figures/outfig.pdf
'''
if args['do_fcount'] == True:
print '-'*4, 'draw a graph of citing-users and their follower count'
draw_citing_users_follower_count()
exit()
infname = 'Results/procjson.tsv'
infname = "Results/clustered_relevant_users.tsv"
with open(infname) as f:
lines = f.readlines()
edges = []
sourc = []
for j,l in enumerate(lines):
l = l.rstrip('\r\n')
lparts = l.split('\t')
edgesLst= [np.int64(p.lstrip('[').rstrip(']')) for p in lparts]
edges.append(tuple(edgesLst))
sourc.append(edgesLst[0])
# Add the twitter users' follower network
# processes this file: twtrs_follower_network.tsv
plusEdgesLst = convert_follower_network_2edgelist()
fllwrsEdges =[]
for x,y in plusEdgesLst:
x = np.int64(x)
y = np.int64(x)
fllwrsEdges.append((x,y))
####
#### Builds the basic graph
####
g = nx.Graph()
g.add_edges_from(edges)
print nx.info(g)
print '-'*4,'draw basic network'
draw_basic_network(g,sourc)
g.add_edges_from(plusEdgesLst)
print nx.info(g)
if args ['do_metrics'] == True:
print '-'*4,'compute network metrics and write to disk'
## \ /
## \/ isualization
# deg distrib
snm.get_degree_dist([g],"citeplus", 'orig')
# write to disk clustering coeffs for this graph
snm.get_clust_coeff([g], 'orig', 'citeplus')
# write to disk egienvalue
snm.network_value_distribution([g], [], 'citeplus')
if 0:
L = nx.normalized_laplacian_matrix(g)
e = np.linalg.eigvals(L.A)
print("Largest eigenvalue:", max(e))
print("Smallest eigenvalue:", min(e))
if __name__ == '__main__':
main()
| mit |
rohanp/scikit-learn | sklearn/covariance/tests/test_covariance.py | 34 | 11120 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
aidiary/deep-learning-theano | utils/visualize_output.py | 1 | 2321 | #coding: utf-8
import numpy as np
import cPickle
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from autoencoder import Autoencoder, load_data
import matplotlib.pyplot as plt
if __name__ == "__main__":
# テストに使うデータミニバッチ
x = T.matrix('x')
# ファイルから学習したパラメータをロード
f = open("autoencoder.pkl", "rb")
state = cPickle.load(f)
f.close()
# 自己符号化器を構築
# 学習時と同様の構成が必要
rng = np.random.RandomState(123)
theano_rng = RandomStreams(rng.randint(2 ** 30))
autoencoder = Autoencoder(numpy_rng=rng,
theano_rng=theano_rng,
input=x,
n_visible=28*28,
n_hidden=500)
# 学習したパラメータをセット
autoencoder.__setstate__(state)
# テスト用データをロード
# 訓練時に使わなかったテストデータで試す
datasets = load_data('mnist.pkl.gz')
test_set_x = datasets[2][0]
# 最初の100枚の画像を描画
# test_set_xは共有変数なのでget_value()で内容を取得できる
pos = 1
for i in range(100):
plt.subplot(10, 10, pos)
plt.subplots_adjust(wspace=0, hspace=0)
plt.imshow(test_set_x.get_value()[i].reshape(28, 28))
plt.gray()
plt.axis('off')
pos += 1
plt.savefig("original_image.png")
# 最初の100枚のテスト画像を入力して再構築した画像を得る関数を定義
feedforward = theano.function([],
autoencoder.feedforward(), # 出力を返すシンボル
givens={ x: test_set_x[0:100] })
# test_set_xのミニバッチの出力層の出力を計算
output = feedforward()
print output.shape
# 出力は0-1に正規化されているため0-255のピクセル値に戻す
output *= 255.0
output = output.astype(np.int)
# 画像を描画
pos = 1
for i in range(100):
plt.subplot(10, 10, pos)
plt.subplots_adjust(wspace=0, hspace=0)
plt.imshow(output[i].reshape(28, 28))
plt.gray()
plt.axis('off')
pos += 1
plt.savefig("reconstructed_image.png")
| mit |
msbeta/apollo | modules/tools/navigation/planning/obstacle_decider.py | 3 | 7929 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from shapely.geometry import LineString
from shapely.geometry import Point
class ObstacleDecider:
def __init__(self):
self.obstacle_lat_ttc = {}
self.obstacle_lon_ttc = {}
self.obstacle_lat_dist = {}
self.obstacle_lon_dist = {}
self.front_edge_to_center = 3.89
self.back_edge_to_center = 1.043
self.left_edge_to_center = 1.055
self.right_edge_to_center = 1.055
self.LAT_DIST = 0.9
self.mobileye = None
self.path_obstacle_processed = False
self.default_lane_width = 3.3
def update(self, mobileye):
self.mobileye = mobileye
self.path_obstacle_processed = False
def process_path_obstacle(self, fpath):
if self.path_obstacle_processed:
return
path_x, path_y = fpath.get_xy()
self.obstacle_lat_dist = {}
path = []
self.mobileye.process_obstacles()
for i in range(len(path_x)):
path.append((path_x[i], path_y[i]))
line = LineString(path)
for obs_id, obstacle in self.mobileye.obstacles.items():
point = Point(obstacle.x, obstacle.y)
dist = line.distance(point)
if dist < self.LAT_DIST + obstacle.width + self.left_edge_to_center:
proj_len = line.project(point)
if proj_len == 0 or proj_len >= line.length:
continue
p1 = line.interpolate(proj_len)
if (proj_len + 1) > line.length:
p2 = line.interpolate(line.length)
else:
p2 = line.interpolate(proj_len + 1)
d = (point.x - p1.x) * (p2.y - p1.y) - (point.y - p1.y) * (
p2.x - p1.x)
if d > 0:
dist *= -1
self.obstacle_lat_dist[obstacle.obstacle_id] = dist
self.path_obstacle_processed = True
# print self.obstacle_lat_dist
def get_adv_left_right_nudgable_dist(self, fpath):
left_nudgable = 0
right_nudgable = 0
routing_y = fpath.init_y()
if routing_y <= 0:
left_nudgable = self.default_lane_width / 2.0 \
- abs(routing_y) \
- self.left_edge_to_center
right_nudgable = self.default_lane_width / 2.0 \
+ abs(routing_y) \
- self.right_edge_to_center
else:
left_nudgable = self.default_lane_width / 2.0 \
+ abs(routing_y) \
- self.left_edge_to_center
right_nudgable = self.default_lane_width / 2.0 \
- abs(routing_y) \
- self.right_edge_to_center
return left_nudgable, -1 * right_nudgable
def get_nudge_distance(self, left_nudgable, right_nudgable):
left_nudge = None
right_nudge = None
for obs_id, lat_dist in self.obstacle_lat_dist.items():
if lat_dist >= 0:
actual_dist = abs(lat_dist) \
- self.mobileye.obstacles[obs_id].width / 2.0 \
- self.left_edge_to_center
if self.LAT_DIST > actual_dist > 0.2:
if right_nudge is None:
right_nudge = -1 * (self.LAT_DIST - actual_dist)
elif right_nudge > -1 * (self.LAT_DIST - actual_dist):
right_nudge = -1 * (self.LAT_DIST - actual_dist)
else:
actual_dist = abs(lat_dist) \
- self.mobileye.obstacles[obs_id].width / 2.0 \
- self.left_edge_to_center
if self.LAT_DIST > actual_dist > 0.2:
if left_nudge is None:
left_nudge = self.LAT_DIST - actual_dist
elif left_nudge < self.LAT_DIST - actual_dist:
left_nudge = self.LAT_DIST - actual_dist
if left_nudge is None and right_nudge is None:
return 0
if left_nudge is not None and right_nudge is not None:
return 0
if left_nudge is not None:
if left_nudgable < left_nudge:
return left_nudgable
else:
return left_nudge
if right_nudge is not None:
if abs(right_nudgable) > abs(right_nudge):
return right_nudgable
else:
return right_nudge
if __name__ == "__main__":
import rospy
from std_msgs.msg import String
import matplotlib.pyplot as plt
from modules.localization.proto import localization_pb2
from modules.canbus.proto import chassis_pb2
from ad_vehicle import ADVehicle
import matplotlib.animation as animation
from modules.drivers.proto import mobileye_pb2
from provider_routing import RoutingProvider
from provider_mobileye import MobileyeProvider
from path_decider import PathDecider
def localization_callback(localization_pb):
ad_vehicle.update_localization(localization_pb)
def routing_callback(routing_str):
routing.update(routing_str)
def chassis_callback(chassis_pb):
ad_vehicle.update_chassis(chassis_pb)
def mobileye_callback(mobileye_pb):
global fpath
mobileye.update(mobileye_pb)
mobileye.process_lane_markers()
fpath = path_decider.get_path(mobileye, routing, ad_vehicle,
obs_decider)
obs_decider.update(mobileye)
obs_decider.process_path_obstacle(fpath)
print "nudge distance = ", obs_decider.get_nudge_distance()
def update(frame):
if not ad_vehicle.is_ready():
return
x = []
y = []
for obs_id, obs in mobileye.obstacles.items():
x.append(obs.x)
y.append(obs.y)
obstacles_points.set_xdata(x)
obstacles_points.set_ydata(y)
if fpath is not None:
px, py = fpath.get_xy()
path_line.set_xdata(px)
path_line.set_ydata(py)
fpath = None
ad_vehicle = ADVehicle()
routing = RoutingProvider()
mobileye = MobileyeProvider()
obs_decider = ObstacleDecider()
path_decider = PathDecider(True, False, False)
rospy.init_node("path_decider_debug", anonymous=True)
rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
localization_callback)
rospy.Subscriber('/apollo/navigation/routing',
String, routing_callback)
rospy.Subscriber('/apollo/canbus/chassis',
chassis_pb2.Chassis,
chassis_callback)
rospy.Subscriber('/apollo/sensor/mobileye',
mobileye_pb2.Mobileye,
mobileye_callback)
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
obstacles_points, = ax.plot([], [], 'ro')
path_line, = ax.plot([], [], 'b-')
ani = animation.FuncAnimation(fig, update, interval=100)
ax.set_xlim([-2, 128])
ax.set_ylim([-5, 5])
# ax2.axis('equal')
plt.show()
| apache-2.0 |
qbj/git_FuXiaotong | Projects/House_Price/house_price_RF_v2/feature_pick.py | 1 | 2514 | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
# **************************** read test data ****************************************
df_train_data = pd.read_csv('C:\\Users\\fuxt2\\Documents\\code\\python\\house_price\\data\\train.csv')
# **************************************** train & test ****************************************
max_ncol = len(df_train_data.columns)
max_nrow = df_train_data.__len__() + 1
percent_test = 0.3
mid_nrow = round(max_nrow*(1-percent_test))
fl_y_train = df_train_data.iloc[:mid_nrow, max_ncol - 1:max_ncol]
fl_y_test = df_train_data.iloc[mid_nrow:, max_ncol - 1:max_ncol]
df_X = df_train_data.iloc[:,:-1].copy() #all features
df_X_train = df_X[:mid_nrow].copy()
df_X_test = df_X[mid_nrow:].copy()
# **************************************** encode data to numbers ****************************************
for i in df_X_train:
df_X_train[i].replace(np.NaN, 0, inplace=True)
if df_X_train[i].dtypes != np.float64:
df_X_train[i] = df_X_train[i].astype(str) # conver to string
encoder = LabelEncoder()
encoder.fit(df_X_train[i])
df_X_train[i] = encoder.transform(df_X_train[i])
# **************************************** standardizing ****************************************
f_min_max = lambda x: (x-np.min(x)) / (np.max(x) - np.min(x))
df_X_train = df_X_train.apply(f_min_max)
# **************************************** modelling ****************************************
RF_regression_model = RandomForestRegressor(max_depth=16, # bigger, more precise
random_state=0,
n_estimators=160, # bigger, more precise
# min_samples_leaf = i, # bigger, less noise
n_jobs=-1
)
RF_regression_model.fit(X = df_X_train,y = fl_y_train.values.ravel())
# **************************************** feature importance ****************************************
feature_importance = RF_regression_model.feature_importances_
top_n_features = 80
indices = np.argsort(feature_importance)[- top_n_features :]
for i in indices:
print(feature_importance[i])
# print(indices)
df_picked_feature = df_train_data.iloc[:,indices].copy()
# print(df_picked_feature.head())
print(','.join(map(str,indices)))
# [54 46 38 57 59 41 49 19 27 17 61 80] | apache-2.0 |
EdgarRMmex/QPSTools | qdf.py | 1 | 5654 | """
"""
import codecs
import re
import pandas as pd
import numpy as np
class Entry:
def __init(self):
self.name = None
self.text = None
self.pandas_type = None
self.qps_type = None
self.start = None
self.size = None
self.max_sel = None
self.responses = None
self.instructions = None
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Response:
def __init__(self):
self.text = None
types_dict = {
"C": "object",
"S": "int",
"M": "int",
"I": "int"
}
class QuestionDefinitions:
"""
"""
def __init__(self):
self.datafile = None
self.quotafile = None
self.qps_version = None
self.entries = []
self.entries_datafile = {}
def parse_qdf(self, path):
with codecs.open(path, "r", "latin1") as file:
qdf_contents = file.read()
qdf_lines = qdf_contents.split("\r\n")
inside_entry = False
inside_response = False
serial_entry = Entry()
serial_entry.pandas_type = "int"
serial_entry.qps_type = "I"
self.entries.append("SERIAL")
self.entries_datafile["SERIAL"] = serial_entry
for line in qdf_lines:
if line.startswith("C S "):
m = re.match(r"^C S (\d+)L(\d+)", line)
self.entries_datafile["SERIAL"].start = eval(str(m.group(1)))
self.entries_datafile["SERIAL"].size = eval(str(m.group(2)))
elif line == "Q B":
inside_entry = True
entry = Entry()
continue
elif line == "Q E":
inside_entry = False
inside_response = False
if entry.pandas_type:
self.entries_datafile[entry.name] = entry
self.entries.append(entry.name)
elif line.startswith("X I ") and inside_entry:
m = re.match(r"^X I [0-9]{1,7} (.*)$", line)
if m:
entry.instructions = m.group(1)
else:
entry.instructions = None
elif line.startswith("Q ") and inside_entry:
m = re.match(r"^Q \[(.*?)\] (.*)$", line)
entry.name = m.group(1)
entry.text = m.group(2)
elif line.startswith("T ") and inside_entry and not inside_response:
m = re.match(r"^T (.*?)$", line)
entry.qps_type = m.group(1)
try:
entry.pandas_type = types_dict[m.group(1)]
except KeyError:
entry.pandas_type = None
if m.group(1) == "T":
entry.start = None
entry.size = None
entry.max_sel = None
elif line.startswith("L ") and inside_entry:
m = re.match(r"^L (\d+)L(\d+)R*(\d+)*$", line)
entry.start = eval(str(m.group(1)))
entry.size = eval(str(m.group(2)))
entry.max_sel = eval(str(m.group(3)))
inside_response = True
entry.responses = []
elif line.startswith("R ") and inside_response:
m = re.match(r"^R (.*?)$", line)
entry.responses.append(m.group(1))
def read_data(self, datafile):
if len(self.entries) == 0:
raise ValueError("There are no entries in the Question definitions")
entry_names = []
entry_specs = []
entry_types = {}
for entry in self.entries:
entry_object = self.entries_datafile[entry]
if entry_object.qps_type == "M":
if entry_object.max_sel:
max_digits = np.floor(np.log10(entry_object.max_sel)) + 1
fmt = "%s${0:0>%dd}" % (entry, max_digits)
spec1, spec2 = entry_object.start, entry_object.start + entry_object.size
for i in range(entry_object.max_sel):
column_name = fmt.format(i + 1)
entry_names.append(column_name)
entry_specs.append((spec1 - 1, spec2 - 1))
entry_types[column_name] = entry_object.pandas_type
spec1, spec2 = spec1 + entry_object.size, spec2 + entry_object.size
else:
max_digits = np.floor(np.log10(len(entry_object.responses))) + 1
fmt = "%s${0:0>%dd}" % (entry, max_digits)
spec1, spec2 = entry_object.start, entry_object.start + 1
for i, response in enumerate(entry_object.responses):
column_name = fmt.format(i + 1)
entry_names.append(column_name)
entry_specs.append((spec1 - 1, spec2 - 1))
entry_types[column_name] = entry_object.pandas_type
spec1, spec2 = spec1 + 1, spec2 + 1
else:
spec1, spec2 = entry_object.start, entry_object.start + entry_object.size
entry_names.append(entry)
entry_specs.append((spec1 - 1, spec2 - 1))
entry_types[entry] = entry_object.pandas_type
return pd.read_fwf(datafile, colspecs=entry_specs, names=entry_names)
def main():
pass
if __name__ == "__main__":
main()
| gpl-3.0 |
gbaier/despeckCL | examples/polsar_test.py | 1 | 3037 | """ example the downloads some fully polarimetric data from ESA's
PolSAR test data set and filters it with NL-SAR """
import os
import urllib.request
import zipfile
import gdal
import matplotlib.pyplot as plt
import numpy as np
# Add build directory to the python search paths for finding the module
# without installing it
import sys
sys.path.insert(0, '../build/swig/python')
import despeckcl
###############################
# #
# Get some test data from ESA #
# #
###############################
URL = 'https://earth.esa.int/documents/653194/658149/'
FILENAME = 'AIRSAR_Flevoland'
DATANAME = 'FLEVOL.STK'
# extracts data to use for training
TRAIN_SUB = np.s_[:, :, 200:230, 200:230]
# extracts data to be filtered and plotted
AREA_SUB = np.s_[:, :, :400, :600]
def stk_reader(stk_filename):
""" see http://gdal.org/frmt_airsar.html for description """
data = gdal.Open(stk_filename)
data = data.ReadAsArray()
mat = np.empty((3, 3, *data.shape[1:]), dtype=np.complex64)
mat[0, 0] = data[0]
mat[0, 1] = data[1]
mat[1, 0] = data[1].conj()
mat[0, 2] = data[2]
mat[2, 0] = data[2].conj()
mat[1, 1] = data[3]
mat[1, 2] = data[4]
mat[2, 1] = data[4].conj()
mat[2, 2] = data[5]
return mat
try:
COVMAT = stk_reader(DATANAME)
except FileNotFoundError:
urllib.request.urlretrieve(URL + FILENAME, FILENAME + '.zip')
with zipfile.ZipFile(FILENAME + '.zip') as zf:
zf.extract(DATANAME)
COVMAT = stk_reader(DATANAME)
#############
# #
# Filtering #
# #
#############
PARAMS = {
'search_window_size': 21,
'patch_sizes': [3, 5, 7],
'scale_sizes': [1, 3],
'h': 3.0,
'c': 49,
'enabled_log_levels': ['warning', 'fatal', 'error'], #, 'debug', 'info']
}
# store and load NL-SAR statistics
STATS_FILENAME = 'polsar_stats.txt'
print('getting similarity statistics')
if os.path.isfile(STATS_FILENAME):
print('found saved statistics... restoring')
NLSAR_STATS = despeckcl.load_nlsar_stats_collection(STATS_FILENAME)
else:
print('computing statistics')
NLSAR_STATS = despeckcl.nlsar_train(
COVMAT[TRAIN_SUB], PARAMS['patch_sizes'], PARAMS['scale_sizes'])
print('storing statistics')
despeckcl.store_nlsar_stats_collection(NLSAR_STATS, STATS_FILENAME)
print('filtering')
COVMAT_FILT = despeckcl.nlsar(
COVMAT[AREA_SUB], nlsar_stats=NLSAR_STATS, **PARAMS)
############
# #
# Plotting #
# #
############
fig = plt.figure()
ax = None
for nr, (data, title) in enumerate(
zip([COVMAT[AREA_SUB], COVMAT_FILT], ['input', 'filtered']), 1):
# extract diagonal elements
diag = np.abs(np.diagonal(data)) + 0.000001
# conversion to dB and normalization
rgb_comp = 10 * np.log10(diag)
rgb_comp_norm = rgb_comp - rgb_comp.min()
rgb_comp_norm /= rgb_comp_norm.max()
ax = fig.add_subplot(1, 2, nr, sharex=ax, sharey=ax)
ax.imshow(rgb_comp_norm)
ax.set_title(title)
plt.show()
| gpl-3.0 |
shanwai1234/Maize_Phenotype_Map | hyperspectral_PCA_visualization.py | 1 | 7348 | import numpy as np
import cv2
from matplotlib import pyplot as plt
import os
import sys
from scipy import linalg as LA
from matplotlib import cm
##############################Hyperspectral Image PCA Visualization#####################################################################################################################
##############################Notice: Since all pixels were analyzed at once, more Images to be analyzed, expotential time will be cost !!!#############################################
# Copy any PlantID folder you are interested to 'test_HYP'
mfold = sys.argv[1]
# Create function PCA2 to generate first three PC coefficents for all analyzed image data
def PCA2(data, dims_rescaled_data=3):
"""
returns: data transformed in 2 dims/columns + regenerated original data
pass in: data as 2D NumPy array
"""
m, n = data.shape
# mean center the data
data -= data.mean(axis=0)
# calculate the covariance matrix
R = np.cov(data, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
# use 'eigh' rather than 'eig' since R is symmetric,
# the performance gain is substantial
evals, evecs = LA.eigh(R)
# sort eigenvalue in decreasing order
idx = np.argsort(evals)[::-1]
evecs = evecs[:,idx]
# sort eigenvectors according to same index
evals = evals[idx]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
evecs = evecs[:, :dims_rescaled_data]
# carry out the transformation on the data using eigenvectors
# and return the re-scaled data, eigenvalues, and eigenvectors
return np.dot(evecs.T, data.T).T, evals, evecs
# Seperating all analyzed pixels using the first two PCs
def plot_pca(data):
clr1 = '#2026B2'
fig = plt.figure()
ax1 = fig.add_subplot(111)
data_resc, data_orig,a = PCA2(data)
ax1.plot(data_resc[:, 0], data_resc[:, 1], '.', mfc=clr1, mec=clr1)
plt.show()
return data_resc
# Using NDVI to segment all plant area by defining threshold greater than 0.25
def rmstem(p705,p750,upper_bound,bottom_bound,left_bound,right_bound):
mypic = []
control = []
myl = np.shape(p705)[0]
myw = np.shape(p705)[1]
y1 = int(upper_bound)
y2 = int(bottom_bound)
x1 = int(left_bound)
x2 = int(right_bound)
for i in range(myl):
if i < y1 or i > y2:continue
for j in range(myw):
if j < x1 or j > x2:continue
ndvi = (p750[i,j]-p705[i,j])/(p750[i,j]+p705[i,j])
if ndvi > 0.25:
n = []
n.append(i)
n.append(j)
mypic.append(n)
else:
m = []
m.append(i)
m.append(j)
control.append(m)
return mypic,control
# Calculating the median intensity of pixels in the non-plant area
def NP(target, pic2):
final = []
for k in target:
i = k[0]
j = k[1]
final.append(pic2[i,j])
fnum = np.median(final)
return fnum
# Storing the reflectance of each pixel and their corresponding positions in the original image
def PCA(target, k, pic2, n):
final = {}
for a in target:
i = a[0]
j = a[1]
myname = "{0}-{1}-{2}".format(i,j,n)
final[myname] = pic2[i,j]/k
return final
# sh is the reference file showing which file corresponds to which wavelength
sh = open('wavelength_foldid.txt','r')
sh.readline()
kdict = {}
# build a library to include file~wavelength information
for line in sh:
new = line.strip().split('\t')
kdict[new[-1]] = new[0]
sh.close()
# because of no germination in most of first three days, we just skip them to speed up running the code
first3 = set([])
for i in range(1,4):
first3.add('Day_'+str(i).zfill(3))
ll = []
whole = os.listdir(mfold)
mdict = {}
tlist = []
# The date you want to visualize, e.g. Day_028
date = sys.argv[2]
for j1 in whole:
tlist.append(j1)
for i1 in os.listdir('{0}/{1}/HYP SV 90/'.format(mfold,j1)):
if i1 != date:continue
subset = os.listdir('{0}/{1}/HYP SV 90/{2}'.format(mfold,j1,i1))
# in every folder, the images of 35_0_0.png and 45_0_0.png should be used firstly in order to subtract the plant area
if True:
m705 = cv2.imread('{0}/{1}/HYP SV 90/{2}/35_0_0.png'.format(mfold,j1,i1))
m750 = cv2.imread('{0}/{1}/HYP SV 90/{2}/45_0_0.png'.format(mfold,j1,i1))
# converting plant images from RGB to GRAY channel
tm705 = cv2.cvtColor(m705,cv2.COLOR_BGR2GRAY)
tm750 = cv2.cvtColor(m750,cv2.COLOR_BGR2GRAY)
tm705 = tm705.astype(np.float)
tm750 = tm750.astype(np.float)
# defining the interested area that we are going to analyze the plant
rmg,back = rmstem(tm705,tm750,45,445,30,273)
for i in subset:
# first two images are not useful and just skip them
if i == '0_0_0.png':continue
if i == '1_0_0.png':continue
# info.txt is not an image file
if i == 'info.txt':continue
name = i.replace('_0_0.png','')
t = cv2.imread('{0}/{1}/HYP SV 90/{2}/{3}'.format(mfold,j1,i1,i))
t = t.astype(np.float)
t1 = t[:,:,0]
# multiply each files in the folder with the binarized image. For each pixel, dividing 255 to make each pixel in 0~1
cint = NP(back,t1)
total = PCA(rmg,cint,t1,j1)
if name not in mdict:
mdict[name] = {}
mdict[name].update(total)
wavelengths = list(mdict)
pixels = list(mdict[wavelengths[0]])
else:
print j1
for p in pixels:
ll.append([])
for w in wavelengths:
ll[-1].append(mdict[w][p])
ll_array = np.array(ll)
data_resc = plot_pca(ll_array)
myxvals = {}
myyvals = {}
mycvals = {}
myplant = set([])
for x in range(3):
mytitle = "PC {0}".format(x+1)
for name,val in zip(pixels,data_resc[:,x]):
l = map(int,name.split('-')[:2])
myplant.add(name.split('-')[2])
myid = 'PC'+str(x)+'-'+name.split('-')[2]
if myid not in myxvals:
myxvals[myid] = []
myyvals[myid] = []
mycvals[myid] = []
myyvals[myid].append(l[0]*(-1))
myxvals[myid].append(l[1])
mycvals[myid].append(val)
n = 0
myxtick = []
myxname = []
ncvals = {}
for i in myplant:
myxname.append(i)
pc0 = 'PC0'+'-'+i
pc1 = 'PC1'+'-'+i
pc2 = 'PC2'+'-'+i
if i not in ncvals:
ncvals[i] = {}
ncvals[i][pc0] = []
ncvals[i][pc1] = []
ncvals[i][pc2] = []
# b is real value of pc value, a is the position of pc value
for a,b in enumerate(mycvals[pc0]):
name = str(myyvals[pc0][a])+'-'+str(myxvals[pc0][a])
if name not in ncvals[i]:
ncvals[i][name] = []
# normalize PCA components for each plant of each genotype by the formula: normalized_value = (value-min_value)/(max_value-min_value)
ncvals[i][name].append((mycvals[pc0][a]-min(mycvals[pc0]))/(max(mycvals[pc0])-min(mycvals[pc0])))
ncvals[i][name].append((mycvals[pc1][a]-min(mycvals[pc1]))/(max(mycvals[pc1])-min(mycvals[pc1])))
ncvals[i][name].append((mycvals[pc2][a]-min(mycvals[pc2]))/(max(mycvals[pc2])-min(mycvals[pc2])))
n = 0
plt.show()
fig = plt.figure()
ax = fig.add_subplot('111')
num = 0
for i in myplant:
xvals = []
yvals = []
cvals = []
pc0 = 'PC0'+'-'+i
nx = max(myxvals[pc0])-min(myxvals[pc0])
ny = max(myyvals[pc0])-min(myyvals[pc0])
for ii in range(nx):
x = ii + min(myxvals[pc0])
for jj in range(ny):
y = jj + min(myyvals[pc0])
pos = str(y)+'-'+str(x)
if pos in ncvals[i]:
clist = ncvals[i][pos]
xvals.append(ii+num*250)
yvals.append(jj)
cvals.append((clist[0],clist[1],clist[2]))
myxtick.append(np.median(xvals))
myxname.append(i)
num += 1
ax.scatter(xvals,yvals,color=cvals)
ax.set_xticks(myxtick)
ax.set_xticklabels(myxname)
ax.set_yticklabels([])
plt.show()
| bsd-3-clause |
antgonza/qiita | qiita_pet/handlers/rest/study_samples.py | 1 | 4233 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.escape import json_encode, json_decode
import pandas as pd
from qiita_db.handlers.oauth2 import authenticate_oauth
from .rest_handler import RESTHandler
class StudySamplesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
samples = []
else:
samples = list(study.sample_template.keys())
self.write(json_encode(samples))
self.finish()
@authenticate_oauth
def patch(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
self.fail('No sample information found', 404)
return
else:
sample_info = study.sample_template.to_dataframe()
data = pd.DataFrame.from_dict(json_decode(self.request.body),
orient='index')
if len(data.index) == 0:
self.fail('No samples provided', 400)
return
categories = set(study.sample_template.categories)
if set(data.columns) != categories:
if set(data.columns).issubset(categories):
self.fail('Not all sample information categories provided',
400)
else:
unknown = set(data.columns) - categories
self.fail("Some categories do not exist in the sample "
"information", 400,
categories_not_found=sorted(unknown))
return
existing_samples = set(sample_info.index)
overlapping_ids = set(data.index).intersection(existing_samples)
new_ids = set(data.index) - existing_samples
status = 500
# warnings generated are not currently caught
# see https://github.com/biocore/qiita/issues/2096
if overlapping_ids:
to_update = data.loc[overlapping_ids]
study.sample_template.update(to_update)
status = 200
if new_ids:
to_extend = data.loc[new_ids]
study.sample_template.extend(to_extend)
status = 201
self.set_status(status)
self.finish()
class StudySamplesCategoriesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id, categories):
if not categories:
self.fail('No categories specified', 405)
return
study = self.safe_get_study(study_id)
if study is None:
return
categories = categories.split(',')
if study.sample_template is None:
self.fail('Study does not have sample information', 404)
return
available_categories = set(study.sample_template.categories)
not_found = set(categories) - available_categories
if not_found:
self.fail('Category not found', 404,
categories_not_found=sorted(not_found))
return
blob = {'header': categories,
'samples': {}}
df = study.sample_template.to_dataframe()
for idx, row in df[categories].iterrows():
blob['samples'][idx] = list(row)
self.write(json_encode(blob))
self.finish()
class StudySamplesInfoHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
st = study.sample_template
if st is None:
info = {'number-of-samples': 0,
'categories': []}
else:
info = {'number-of-samples': len(st),
'categories': st.categories}
self.write(json_encode(info))
self.finish()
| bsd-3-clause |
hlin117/statsmodels | statsmodels/iolib/summary.py | 22 | 33071 | from statsmodels.compat.python import range, lrange, lmap, lzip, zip_longest
import numpy as np
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2,
fmt_params, fmt_base, fmt_2cols)
#from statsmodels.iolib.summary2d import summary_params_2dflat
#from summary2d import summary_params_2dflat
def forg(x, prec=3):
if prec == 3:
#for 3 decimals
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%9.3g' % x
else:
return '%9.3f' % x
elif prec == 4:
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%10.4g' % x
else:
return '%10.4f' % x
else:
raise NotImplementedError
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
-----------
yname : string
optional, Default is `Y`
xname : list of strings
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : string
optional, Defualt is 'Generalized linear model'
returns : string
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Default :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
import time as time
#TODO Make sure all self.model.__class__.__name__ are listed
model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'GLM' : 'Generalized linear model'
}
model_methods = {'OLS' : 'Least Squares',
'GLS' : 'Least Squares',
'GLSAR' : 'Least Squares',
'WLS' : 'Least Squares',
'RLM' : '?',
'GLM' : '?'
}
if title==0:
title = model_types[self.model.__class__.__name__]
if yname is None:
try:
yname = self.model.endog_names
except AttributeError:
yname = 'y'
if xname is None:
try:
xname = self.model.exog_names
except AttributeError:
xname = ['var_%d' % i for i in range(len(self.params))]
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
#dist_family = self.model.family.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
#TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), #What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
## gen_stubs_left = ('Model type:',
## 'Date:',
## 'Dependent Variable:',
## 'df model'
## )
## gen_data_left = [[modeltype],
## [date],
## yname, #What happens with multiple names?
## [df_model]
## ]
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid'
)
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
tstats = {'OLS' : self.t(),
'GLS' : self.t(),
'GLSAR' : self.t(),
'WLS' : self.t(),
'RLM' : self.t(),
'GLM' : self.t()
}
prob_stats = {'OLS' : self.pvalues,
'GLS' : self.pvalues,
'GLSAR' : self.pvalues,
'WLS' : self.pvalues,
'RLM' : self.pvalues,
'GLM' : self.pvalues
}
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in \
exog_len]
)
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title = None,
txt_fmt = fmt_2, #gen_fmt,
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def ols_to_csv():
"""
exports ols summary data to csv
"""
pass
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
pass
printers = {'OLS': ols_printer,
'GLM' : glm_printer
}
if returns=='print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']()
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if hasattr(self.model, 'endog_names') and (
not self.model.endog_names is None):
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if hasattr(self.model, 'exog_names') and (
not self.model.exog_names is None):
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
def summary_top(results, title=None, gleft=None, gright=None, yname=None, xname=None):
'''generate top table(s)
TODO: this still uses predefined model_methods
? allow gleft, gright to be 1 element tuples instead of filling with None?
'''
#change of names ?
gen_left, gen_right = gleft, gright
#time and names are always included
import time
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
yname, xname = _getnames(results, yname=yname, xname=xname)
#create dictionary with default
#use lambdas because some values raise exception if they are not available
#alternate spellings are commented out to force unique labels
default_items = dict([
('Dependent Variable:', lambda: [yname]),
('Dep. Variable:', lambda: [yname]),
('Model:', lambda: [results.model.__class__.__name__]),
#('Model type:', lambda: [results.model.__class__.__name__]),
('Date:', lambda: [date]),
('Time:', lambda: time_of_day),
('Number of Obs:', lambda: [results.nobs]),
#('No. of Observations:', lambda: ["%#6d" % results.nobs]),
('No. Observations:', lambda: ["%#6d" % results.nobs]),
#('Df model:', lambda: [results.df_model]),
('Df Model:', lambda: ["%#6d" % results.df_model]),
#TODO: check when we have non-integer df
('Df Residuals:', lambda: ["%#6d" % results.df_resid]),
#('Df resid:', lambda: [results.df_resid]),
#('df resid:', lambda: [results.df_resid]), #check capitalization
('Log-Likelihood:', lambda: ["%#8.5g" % results.llf]) #doesn't exist for RLM - exception
#('Method:', lambda: [???]), #no default for this
])
if title is None:
title = results.model.__class__.__name__ + 'Regression Results'
if gen_left is None:
#default: General part of the summary table, Applicable to all? models
gen_left = [('Dep. Variable:', None),
('Model type:', None),
('Date:', None),
('No. Observations:', None),
('Df model:', None),
('Df resid:', None)]
try:
llf = results.llf
gen_left.append(('Log-Likelihood', None))
except: #AttributeError, NotImplementedError
pass
gen_right = []
gen_title = title
gen_header = None
#needed_values = [k for k,v in gleft + gright if v is None] #not used anymore
#replace missing (None) values with default values
gen_left_ = []
for item, value in gen_left:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_left_.append((item, value))
gen_left = gen_left_
if gen_right:
gen_right_ = []
for item, value in gen_right:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_right_.append((item, value))
gen_right = gen_right_
#check
missing_values = [k for k,v in gen_left + gen_right if v is None]
assert missing_values == [], missing_values
#pad both tables to equal number of rows
if gen_right:
if len(gen_right) < len(gen_left):
#fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
#fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
#padding in SimpleTable doesn't work like I want
#force extra spacing and exact string length in right table
gen_right = [('%-21s' % (' '+k), v) for k,v in gen_right]
gen_stubs_right, gen_data_right = zip_longest(*gen_right) #transpose row col
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = fmt_2cols #gen_fmt
)
else:
gen_table_right = [] #because .extend_right seems works with []
#moved below so that we can pad if needed to match length of gen_right
#transpose rows and columns, `unzip`
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = fmt_2cols
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
return general_table #, gen_table_left, gen_table_right
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, title=None):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if skip_header:
param_header = None
_, xname = _getnames(results, yname=yname, xname=xname)
params_stubs = xname
exog_idx = lrange(len(xname))
params_data = lzip([forg(params[i], prec=4) for i in exog_idx],
[forg(std_err[i]) for i in exog_idx],
[forg(tvalues[i]) for i in exog_idx],
["%#6.3f" % (pvalues[i]) for i in exog_idx],
[forg(conf_int[i,0]) for i in exog_idx],
[forg(conf_int[i,1]) for i in exog_idx]
)
parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title = title,
txt_fmt = fmt_params #gen_fmt #fmt_2, #gen_fmt,
)
return parameter_table
def summary_params_frame(results, yname=None, xname=None, alpha=.05,
use_t=True):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'Conf. Int. Low', 'Conf. Int. Upp.']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'Conf. Int. Low', 'Conf. Int. Upp.']
_, xname = _getnames(results, yname=yname, xname=xname)
#------------------
from pandas import DataFrame
table = np.column_stack((params, std_err, tvalues, pvalues, conf_int))
return DataFrame(table, columns=param_header, index=xname)
def summary_params_2d(result, extras=None, endog_names=None, exog_names=None,
title=None):
'''create summary table of regression parameters with several equations
This allows interleaving of parameters with bse and/or tvalues
Parameters
----------
result : result instance
the result instance with params and attributes in extras
extras : list of strings
additional attributes to add below a parameter row, e.g. bse or tvalues
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
title : None or string
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
if endog_names is None:
#TODO: note the [1:] is specific to current MNLogit
endog_names = ['endog_%d' % i for i in
np.unique(result.model.endog)[1:]]
if exog_names is None:
exog_names = ['var%d' %i for i in range(len(result.params))]
#TODO: check formatting options with different values
#res_params = [['%10.4f'%item for item in row] for row in result.params]
res_params = [[forg(item, prec=4) for item in row] for row in result.params]
if extras: #not None or non-empty
#maybe this should be a simple triple loop instead of list comprehension?
#below_list = [[['%10s' % ('('+('%10.3f'%v).strip()+')')
extras_list = [[['%10s' % ('(' + forg(v, prec=3).strip() + ')')
for v in col]
for col in getattr(result, what)]
for what in extras
]
data = lzip(res_params, *extras_list)
data = [i for j in data for i in j] #flatten
stubs = lzip(endog_names, *[['']*len(endog_names)]*len(extras))
stubs = [i for j in stubs for i in j] #flatten
#return SimpleTable(data, headers=exog_names, stubs=stubs)
else:
data = res_params
stubs = endog_names
# return SimpleTable(data, headers=exog_names, stubs=stubs,
# data_fmts=['%10.4f'])
import copy
txt_fmt = copy.deepcopy(fmt_params)
txt_fmt.update(dict(data_fmts = ["%s"]*result.params.shape[1]))
return SimpleTable(data, headers=exog_names,
stubs=stubs,
title=title,
# data_fmts = ["%s"]),
txt_fmt = txt_fmt)
def summary_params_2dflat(result, endog_names=None, exog_names=None, alpha=0.05,
use_t=True, keep_headers=True, endog_cols=False):
#skip_headers2=True):
'''summary table for parameters that are 2d, e.g. multi-equation models
Parameters
----------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError('endog_names has wrong length')
else:
if not len(endog_names) == len(params):
raise ValueError('endog_names has wrong length')
n_equ = 1
#VAR doesn't have conf_int
#params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
#this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = 'endog'
else:
endog_basename = endog_names
#TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
#check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (res, res.params[:,eq], res.bse[:,eq], res.tvalues[:,eq],
res.pvalues[:,eq], res.conf_int(alpha)[eq])
#not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(restup, yname=endog_names[eq],
xname=exog_names, alpha=alpha, use_t=use_t,
skip_header=skiph)
tables.append(tble)
#add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
def table_extend(tables, keep_headers=True):
'''extend a list of SimpleTables, adding titles to header of subtables
This function returns the merged table as a deepcopy, in contrast to the
SimpleTable extend method.
Parameters
----------
tables : list of SimpleTable instances
keep_headers : bool
If true, then all headers are kept. If falls, then the headers of
subtables are blanked out.
Returns
-------
table_all : SimpleTable
merged tables as a single SimpleTable instance
'''
from copy import deepcopy
for ii, t in enumerate(tables[:]): #[1:]:
t = deepcopy(t)
#move title to first cell of header
#TODO: check if we have multiline headers
if t[0].datatype == 'header':
t[0][0].data = t.title
t[0][0]._datatype = None
t[0][0].row = t[0][1].row
if not keep_headers and (ii > 0):
for c in t[0][1:]:
c.data = ''
#add separating line and extend tables
if ii == 0:
table_all = t
else:
r1 = table_all[-1]
r1.add_format('txt', row_dec_below='-')
table_all.extend(t)
table_all.title = None
return table_all
def summary_return(tables, return_fmt='text'):
######## Return Summary Tables ########
# join table parts then print
if return_fmt == 'text':
strdrop = lambda x: str(x).rsplit('\n',1)[0]
#convert to string drop last line
return '\n'.join(lmap(strdrop, tables[:-1]) + [str(tables[-1])])
elif return_fmt == 'tables':
return tables
elif return_fmt == 'csv':
return '\n'.join(map(lambda x: x.as_csv(), tables))
elif return_fmt == 'latex':
#TODO: insert \hline after updating SimpleTable
import copy
table = copy.deepcopy(tables[0])
del table[-1]
for part in tables[1:]:
table.extend(part)
return table.as_latex_tabular()
elif return_fmt == 'html':
return "\n".join(table.as_html() for table in tables)
else:
raise ValueError('available output formats are text, csv, latex, html')
class Summary(object):
'''class to hold tables for result summary presentation
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated tables are not saved separately.
extra_txt : string
extra lines that are added to the text output, used for warnings and explanations.
'''
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
#return '<' + str(type(self)) + '>\n"""\n' + self.__str__() + '\n"""'
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table_2cols(self, res, title=None, gleft=None, gright=None,
yname=None, xname=None):
'''add a double table, 2 tables with one column merged horizontally
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
title : string or None
if None, then a default title is used.
gleft : list of tuples
elements for the left table, tuples are (name, value) pairs
If gleft is None, then a default table is created
gright : list of tuples or None
elements for the right table, tuples are (name, value) pairs
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
Returns
-------
None : tables are attached
'''
table = summary_top(res, title=title, gleft=gleft, gright=gright,
yname=yname, xname=xname)
self.tables.append(table)
def add_table_params(self, res, yname=None, xname=None, alpha=.05,
use_t=True):
'''create and add a table for the parameter estimates
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
Returns
-------
None : table is attached
'''
if res.params.ndim == 1:
table = summary_params(res, yname=yname, xname=xname, alpha=alpha,
use_t=use_t)
elif res.params.ndim == 2:
# _, table = summary_params_2dflat(res, yname=yname, xname=xname,
# alpha=alpha, use_t=use_t)
_, table = summary_params_2dflat(res, endog_names=yname,
exog_names=xname,
alpha=alpha, use_t=use_t)
else:
raise ValueError('params has to be 1d or 2d')
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : string
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : string
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if not self.extra_txt is None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : string
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
return summary_return(self.tables, return_fmt='latex')
def as_csv(self):
'''return tables as string
Returns
-------
csv : string
concatenated summary tables in comma delimited format
'''
return summary_return(self.tables, return_fmt='csv')
def as_html(self):
'''return tables as string
Returns
-------
html : string
concatenated summary tables in HTML format
'''
return summary_return(self.tables, return_fmt='html')
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog)
res = sm.OLS(data.endog, data.exog).fit()
#summary(
| bsd-3-clause |
brianmckenna/sci-wms | wms/wms_handler.py | 1 | 7363 | # -*- coding: utf-8 -*-
from datetime import datetime, date
from dateutil.parser import parse
from dateutil.tz import tzutc
import pyproj
from wms.utils import DotDict, split
from wms import logger
def get_bbox(request):
"""
Return the [lonmin, latmin, lonmax, lonmax] - [lower (x,y), upper(x,y)]
Units will be specified by projection.
"""
elements = [ float(el) for el in request.GET["bbox"].split(",") ]
return DotDict(minx=elements[0], miny=elements[1], maxx=elements[2], maxy=elements[3])
def get_wgs84_bbox(request):
"""
Return the [lonmin, latmin, lonmax, lonmax] - [lower (x,y), upper(x,y)]
in WGS84
"""
EPSG4326 = pyproj.Proj(init='EPSG:4326')
crs = get_projection(request)
bbox = get_bbox(request)
wgs84_minx, wgs84_miny = pyproj.transform(crs, EPSG4326, bbox.minx, bbox.miny)
wgs84_maxx, wgs84_maxy = pyproj.transform(crs, EPSG4326, bbox.maxx, bbox.maxy)
return DotDict(minx=wgs84_minx, miny=wgs84_miny, maxx=wgs84_maxx, maxy=wgs84_maxy, bbox=(wgs84_minx, wgs84_miny, wgs84_maxx, wgs84_maxy))
def get_format(request):
"""
Return the FORMAT for GetLegendGraphic requests
"""
try:
return 'image/png' # request.GET['format'].lower()
except KeyError:
return 'image/png'
def get_show_label(request):
"""
Return the SHOWLABEL for GetLegendGraphic requests
"""
try:
if 'colorbaronly' in request.GET and request.GET['colorbaronly'].lower() == 'true':
return False
else:
return request.GET['showlabel'].lower() == 'true'
except KeyError:
return True
def get_units(request, units):
"""
Return the UNITS for GetLegendGraphic requests
"""
try:
return request.GET['unitlabel'].lower()
except KeyError:
return units
def get_logscale(request, default_logscale):
"""
Return the LOGSCALE for GetLegendGraphic requests
"""
try:
return request.GET['logscale'].lower() == 'true'
except KeyError:
return default_logscale
def get_horizontal(request):
"""
Return the horizontal for GetLegendGraphic requests
"""
try:
return request.GET['horizontal'].lower() == 'true'
except KeyError:
return False
def get_show_values(request):
"""
Return the SHOWVALUES for GetLegendGraphic requests
"""
try:
if 'colorbaronly' in request.GET and request.GET['colorbaronly'].lower() == 'true':
return False
else:
return request.GET['showvalues'].lower() == 'true'
except KeyError:
return True
def get_num_contours(request, default=None):
"""
Return the NUMCONTOURS for GetLegendGraphic requests
"""
default = default or 8
try:
return int(float(request.GET['numcontours'].lower()))
except (KeyError, ValueError):
return default
def get_info_format(request):
"""
Return the INFO_FORMAT for GetFeatureInfo requests
"""
try:
return request.GET['info_format'].lower()
except KeyError:
return None
def get_projection(request):
"""
Return the projection string passed into the request.
Can be specified by \"SRS\" or \"CRS\" key (string).
If \"SRS\" or \"CRS\" is not available, default to mercator.
"""
projstr = request.GET.get("srs")
if not projstr:
projstr = request.GET.get("crs")
if not projstr:
projstr = "EPSG:3857"
logger.debug("SRS or CRS no available in requst, defaulting to EPSG:3857 (mercator)")
return pyproj.Proj(init=projstr)
def get_xy(request):
"""
Returns list of floats
"""
try:
x = float(request.GET.get('x'))
except ValueError:
x = None
try:
y = float(request.GET.get('y'))
except ValueError:
y = None
return DotDict(x=x, y=y)
def get_elevation(request):
"""
Return the elevation
"""
try:
elev = request.GET["elevation"]
return float(elev)
except (TypeError, KeyError):
return 0
def get_time(request):
"""
Return the min and max times
"""
time = request.GET.get('time')
if time is None:
return datetime.utcnow()
else:
dt = parse(time)
if dt.tzinfo is not None:
utc_dt = dt.astimezone(tzutc()) # convert UTC if tzinfo is available
utc_tz_naive = utc_dt.replace(tzinfo=None)
else:
utc_tz_naive = dt
return utc_tz_naive
def get_times(request):
"""
Return the min and max times
"""
time = request.GET.get('time')
if not time:
time = date.today().isoformat() + "T00:00:00"
times = sorted([ parse(t) for t in time.split("/") ])
return DotDict(min=times[0], max=times[-1])
def get_colormap(request, parameter=None, default=None):
parameter = parameter or 'styles'
default = default or 'cubehelix'
try:
from matplotlib.pyplot import colormaps
requested_cm = split(request.GET.get(parameter).split(',')[0], '_', maxsplit=1)[1]
assert requested_cm
return next(x for x in colormaps() if x.lower() == requested_cm.lower())
except (AssertionError, IndexError, AttributeError, TypeError, StopIteration):
return default
def get_imagetype(request, parameter=None, default=None):
parameter = parameter or 'styles'
default = default or 'filledcontours'
try:
z = split(request.GET.get(parameter).split(',')[0], '_', maxsplit=1)[0].lower()
assert z
return z
except (AssertionError, IndexError, AttributeError, TypeError):
return default
def get_vectorscale(request):
try:
vectorscale = float(request.GET.get('vectorscale'))
except (AttributeError, TypeError):
vectorscale = 1
return vectorscale
def get_vectorstep(request):
try:
vectorstep = int(request.GET.get('vectorstep'))
except TypeError:
vectorstep = 1 # equivalent to getting all the data
return vectorstep
def get_colorscalerange(request, default_min, default_max):
try:
climits = sorted([ float(x) for x in request.GET.get('colorscalerange').split(',') ])
return DotDict(min=climits[0], max=climits[-1])
except (AttributeError, TypeError):
return DotDict(min=default_min, max=default_max)
def get_dimensions(request, default_width=None, default_height=None):
"""
Return width and height of requested view.
RETURNS width, height request should be in pixel units.
"""
try:
width = float(request.GET.get("width"))
height = float(request.GET.get("height"))
return DotDict(width=width, height=height)
except:
return DotDict(width=default_width, height=default_height)
def get_gfi_positions(xy, bbox, crs, dims):
""" Returns the latitude and longitude the GFI should be performed at"""
EPSG4326 = pyproj.Proj(init='EPSG:4326')
lon, lat = pyproj.transform(crs, EPSG4326, bbox.minx+((bbox.maxx-bbox.minx)*(xy.x/dims.width)), bbox.maxy-((bbox.maxy-bbox.miny)*(xy.y/dims.height)))
return DotDict(latitude=lat, longitude=lon)
def get_item(request):
"""
Returns the GetMetadata 'item' function
"""
try:
return request.GET["item"].lower()
except KeyError:
return None
| gpl-3.0 |
Barmaley-exe/scikit-learn | sklearn/ensemble/partial_dependence.py | 36 | 14909 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
winklerand/pandas | pandas/tests/plotting/test_deprecated.py | 1 | 1535 | # coding: utf-8
import string
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pytest
from numpy.random import randn
import pandas.tools.plotting as plotting
from pandas.tests.plotting.common import TestPlotBase
"""
Test cases for plot functions imported from deprecated
pandas.tools.plotting
"""
@td.skip_if_no_mpl
class TestDeprecatedNameSpace(TestPlotBase):
@pytest.mark.slow
def test_scatter_plot_legacy(self):
tm._skip_if_no_scipy()
df = pd.DataFrame(randn(100, 2))
with tm.assert_produces_warning(FutureWarning):
plotting.scatter_matrix(df)
with tm.assert_produces_warning(FutureWarning):
pd.scatter_matrix(df)
@pytest.mark.slow
def test_boxplot_deprecated(self):
df = pd.DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
with tm.assert_produces_warning(FutureWarning):
plotting.boxplot(df, column=['one', 'two'],
by='indic')
@pytest.mark.slow
def test_radviz_deprecated(self):
df = self.iris
with tm.assert_produces_warning(FutureWarning):
plotting.radviz(frame=df, class_column='Name')
@pytest.mark.slow
def test_plot_params(self):
with tm.assert_produces_warning(FutureWarning):
pd.plot_params['xaxis.compat'] = True
| bsd-3-clause |
EvenStrangest/tensorflow | tensorflow/examples/skflow/iris_custom_decay_dnn.py | 3 | 1749 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
# setup exponential decay function
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
# use customized decay function in learning_rate
optimizer = tf.train.AdagradOptimizer(learning_rate=exp_decay)
classifier = tf.contrib.learn.DNNClassifier(hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer)
classifier.fit(X_train, y_train, steps=800)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
| apache-2.0 |
aterrel/blaze | blaze/api/tests/test_into.py | 1 | 3436 | import unittest
from dynd import nd
import numpy as np
from datashape import dshape
from blaze.api.into import into, discover
import blaze
def skip(test_foo):
return
def skip_if_not(x):
def maybe_a_test_function(test_foo):
if not x:
return
else:
return test_foo
return maybe_a_test_function
class Test_into(unittest.TestCase):
def test_containers(self):
self.assertEqual(into([], (1, 2, 3)),
[1, 2, 3])
self.assertEqual(into((), (1, 2, 3)),
(1, 2, 3))
self.assertEqual(into({}, [(1, 2), (3, 4)]),
{1: 2, 3: 4})
self.assertEqual(into((), {1: 2, 3: 4}),
((1, 2), (3, 4)))
self.assertEqual(into((), {'cat': 2, 'dog': 4}),
(('cat', 2), ('dog', 4)))
def test_dynd(self):
self.assertEqual(nd.as_py(into(nd.array(), (1, 2, 3))),
nd.as_py(nd.array([1, 2, 3])))
self.assertEqual(into([], nd.array([1, 2])),
[1, 2])
self.assertEqual(into([], nd.array([[1, 2], [3, 4]])),
[[1, 2], [3, 4]])
def test_numpy(self):
assert (into(np.array(0), [1, 2]) == np.array([1, 2])).all()
self.assertEqual(into([], np.array([1, 2])),
[1, 2])
def test_type(self):
self.assertEqual(into(list, (1, 2, 3)),
into([], (1, 2, 3)))
self.assertEqual(str(into(np.ndarray, (1, 2, 3))),
str(into(np.ndarray(()), (1, 2, 3))))
try:
from pandas import DataFrame
except ImportError:
DataFrame = None
try:
from blaze.data.python import Python
except ImportError:
Python = None
@skip_if_not(DataFrame and Python)
def test_pandas_data_descriptor():
data = [['Alice', 100], ['Bob', 200]]
schema='{name: string, amount: int}'
dd = Python(data, schema=schema)
result = into(DataFrame, dd)
expected = DataFrame(data, columns=['name', 'amount'])
print(result)
print(expected)
assert str(result) == str(expected)
@skip_if_not(DataFrame and nd.array)
def test_pandas_dynd():
data = [['Alice', 100], ['Bob', 200]]
schema='{name: string, amount: int}'
arr = nd.array(data, dtype=schema)
result = into(DataFrame, arr)
expected = DataFrame(data, columns=['name', 'amount'])
print(result)
print(expected)
assert str(result) == str(expected)
@skip_if_not(DataFrame)
def test_pandas_seq():
assert str(into(DataFrame, [1, 2])) == \
str(DataFrame([1, 2]))
assert str(into(DataFrame, (1, 2))) == \
str(DataFrame([1, 2]))
assert str(into(DataFrame(columns=['a', 'b']), [(1, 2), (3, 4)])) == \
str(DataFrame([[1, 2], [3, 4]], columns=['a', 'b']))
@skip_if_not(DataFrame)
def test_discover_pandas():
data = [['Alice', 100], ['Bob', 200]]
df = DataFrame(data, columns=['name', 'balance'])
print(discover(df))
assert discover(df).subshape[0] == dshape('{name: string, balance: int64}')
@skip_if_not(DataFrame and nd.array)
def test_discover_pandas():
data = [('Alice', 100), ('Bob', 200)]
df = DataFrame(data, columns=['name', 'balance'])
result = into(nd.array, df)
assert nd.as_py(result, tuple=True) == data
| bsd-3-clause |
OpenSourcePolicyCenter/multi-country | Python/7CountryAlphaV1/demographicswithclasses2.py | 2 | 15883 | import numpy as np
import csv
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
GENS = 90
FIRST_DEATH_AGE = 68
OLDEST_IMMIGRANTS = 65
FIRST_FERTILITY_YEAR = 23
LAST_FERTILITY_YEAR = 45
FIRST_WORK_YEAR = 21
SKILL_GROUPS = 2
MAX_YEARS = 300
"""
TODO:
#net_migration only goes to age 65?
#Constant migration each year?
#Low skill babies become low skill parents?
#Make some paramater like Maxyears = 300 that is the furthest number of years the model can go
#Look at paper to compare projections past 2050
#Make delta change for each Region, or import it from somewhere
#Make sure all the dimentions of data in here correspond to WorldModel (fertility is different for example)
#Update data readin files format
"""
def Region_information():
"""
********************************************************************************************************************
REGION CLASS DATA TYPES:
self.name: string
-name of the region, eg. "Russia", "EU"
-Used when generically printing a Region and to gather data from the folder Data_files
self.index: integer
-USA = 1, EU = 2, Japan = 3, China = 4, India = 5, Russia = 6, Korea = 7
-Used also to gather data from outside .csv files, specifically from population.csv and net_migration.csv
self.initial_population
-Vector of length GENS+1 for ages 0-GENS+1 that contains exact number of people in each
-Comes from population.csv for specific country
-Note: population.csv is in thousands of people, while self.initial_population is in exact number of people
self.net_migration
-Vector of length 65 for ages 1-65 that has the total number of immigrants for per year for each age group
-Comes from net_migration.csv for specific country
-Note: net_migration.csv is in hundreds of people, while self.net_migration is in exact number of people
self.fertility_rates
-Has 23 rows for ages 23-45 (when agents give birth) and 51 columns for years 2008-2058
-Essentially the transpose of the data in self.name_fertility.csv for nonnegative years
self.mortality_rates
-Has 23 rows for ages 68-90 and 51 columns for years 2008-2058
-Essentially the transpose of the data in self.name_mortality.csv
self.skill_distributions
-Vector of length 2 that has the percentage of total population in each skill group
self.population_timepath: np.array((GENS, number of simulated years, SKILL_GROUPS))
-91 rows for ages 0-90, Has 1 column for year 2008, and 2 depth for skill classes 0-1.
-Will add more columns for additional years simulated
-Stores the population size for each generation, year, and skill group
self.KID_mat: np.array((GENS, number of simulated years, SKILL_GROUPS))
-91 rows for ages 0-90, Has 1 column for year 2008, and 2 depth for skill classes 0-1.
-Will add more columns for additional years simulated
-Stores the number of kids each agent of that generation has. This implies the following:
-Ages 0-22 = 0
-Ages 45-68 = A constant because they have kids from their fertile years, but aren't having new kids
-Ages 68-90 = 0
self.timeendowment: np.array(number of simulated years)
-Stores the time endowment for each year. This is h(a, i) in the paper
self.delta: double
-Technilogical progress
REGION CLASS FUNCTIONS:
readindata():
newKids(year):
-Creates a vector of kids length 91x2 that is added to self.KID_mat for each year
simulate_demographics(Years):
-Simulates population changes for "Years" years
-Kills old people, makes new babies, adds immigration, gets a new time endowment, and adds new kids and generation to their respective matrices
plot_demographics(year):
-Plots Population distribution, Fertility, Mortality, and Net Migration for the inputed year across ages
plot_population_distribution(years):
-Takes in a list of years and plots the Population Distribution of those years on the same graph
plot_total_population(year):
-Plots the change in total population over time from 2008 to the inputed year
get_total_population(year):
-Returns the changes in totalpopulation up until the given year
get_population_distribution(year, returnclasses = False):
-Returns the population distribution for a given year. If returnclasses = True it returns the population with the extra dimension for classes
get_fertility_rate(year):
-Returns the fertility rate for a given year
get_mortality_rate(year, returnall = False):
-Returns mortality rate for a given year
-If returnall == True, it returns the whole array of mortality rates
get_total_netmigration_rate():
-Returns the net migration data across age
-As of now it is the same for each year
get_kids_mat(year, returnall = False):
-Returns the distribution of kids taken from KID_mat for the given year
-If returnall == True, it returns the entire KID_mat
******************************************************************************************************************
"""
class Region(object):
def __init__(self, name, index):
self.name = name
self.index = index
self.delta = .01
def readindata():
with open('Data_files/Backup/population.csv','r') as csv_file:
csv_reader=csv.reader(csv_file)
popdata = []
for age in csv_reader:
for region in csv_reader:
popdata.append(region[self.index])
popdata = np.array(popdata).astype(np.float)*1000
with open('Data_files/Backup/skillclasses.csv','r') as csv_file:
csv_reader=csv.reader(csv_file)
skilldata = []
for skill in csv_reader:
for region in csv_reader:
skilldata.append(region[self.index])
skilldata = np.array(skilldata).astype(np.float)
with open("Data_files/Backup/"+(self.name).lower()+"_fertility.csv","r") as csv_file:
csv_reader=csv.reader(csv_file)
fertdata = []
for age in csv_reader:
for year in csv_reader:
fertdata.append(year[1:])
initfertdata = np.zeros((91, 51))
initfertdata[23:46,:] = np.transpose(np.array(fertdata).astype(np.float)[48:,])
fertdata2 = initfertdata
#fertdata = np.transpose(np.array(fertdata).astype(np.float)[48:,])
with open("Data_files/Backup/"+str(self.name).lower()+"_mortality.csv","r") as csv_file:
csv_reader=csv.reader(csv_file)
mortdata = []
for age in csv_reader:
for year in csv_reader:
mortdata.append(year[1:])
initmortdata = np.zeros((91, 51))
initmortdata[68:,:] = np.transpose(np.array(mortdata).astype(np.float))
mortdata2 = initmortdata
mortdata = np.transpose(np.array(mortdata).astype(np.float))
with open('Data_files/Backup/net_migration.csv','r') as csv_file:
csv_reader=csv.reader(csv_file)
migdata = []
for age in csv_reader:
for region in csv_reader:
migdata.append(region[self.index])
initmigdata = np.zeros(91)
initmigdata[:65] = np.array(migdata).astype(np.float)
migdata2 = initmigdata*100
migdata = np.array(migdata).astype(np.float)*100
#Takes initial population and migration and give them an 2nd dimention of length 2 for each skill group
popskilldata = np.zeros((GENS+1, SKILL_GROUPS))
migskilldata = np.zeros((OLDEST_IMMIGRANTS, SKILL_GROUPS))
migskilldata2 = np.zeros((GENS+1, SKILL_GROUPS))
for k in range(SKILL_GROUPS):
popskilldata[:,k] = popdata*skilldata[k]
migskilldata[:,k] = migdata*skilldata[k]
migskilldata2[:,k] = migdata2*skilldata[k]
return popskilldata, migskilldata2, fertdata2, mortdata2, skilldata
self.initial_population, self.net_migration, self.fertility_rates, self.mortality_rates, self.skill_distributions = readindata()
self.population_timepath = np.zeros((91,MAX_YEARS+1,2))
self.population_timepath[:,0,:] = self.initial_population
#self.KID_mat = self.newKids(2008).reshape((91, 1, 2))
self.timeendowment = np.ones(1)
def __repr__(self):
return self.name
def newKids(self, year):
if year > 2050:
#We only have fertility rates after the year 2050
year = 2050
#Column that contains the number of kids each fertile age group has (ages 23-45 in this case)
fertilenumkids = np.cumsum(self.fertility_rates[0:FIRST_FERTILITY_YEAR+1, year-2008])
#Combines the number of kids for age groups 0-22, 23-45, 45-65, and 66-90 into one vector of length 91
numkids = np.hstack((np.zeros(23) , fertilenumkids , np.ones((20))*fertilenumkids[-1] , np.zeros(25)))
#Adding a column of numkids for each skill group
kidsvec = np.tile(numkids.reshape(GENS+1, 1), (1, SKILL_GROUPS))
return kidsvec
def simulate_demographics(self, Years):
Transyear = 50#Number of years for which we have data. Anything beyond this will using data for the 50th year
for t in xrange(1, Years):
#If the current year t is less than or equal to the greatest year for which we have data (Transyear), use the current year t as the index i for pulling data
if t <= Transyear:
i = t
#If the current year t is beyond the greatest year for which we have data (Transyear), use the most recent year as the index i for pulling data
elif t > Transyear:
i = Transyear
#For each skill group: Shifts the population one generation over, killing all of the oldest generation, and adds immigrants
population = self.population_timepath[:-1,t-1,:] + self.net_migration[:-1,:]
#Gets the survival probability of each generation/skillgroup and manipulates the dimensions for easy multiplication in the next step
survival_rates = np.transpose(np.tile((1-self.mortality_rates[1:,i]),(2,1)))
#Gets the surviving fraction of the population and stores it in the population timepath for the current year t
self.population_timepath[1:,t,:] = np.multiply(population, survival_rates)
#Gets the number of newborns by taking a dot product of the fertility rates and the population
newborns = np.reshape(np.dot(self.fertility_rates[:,i], self.population_timepath[:,i,:]), (1,1,2))
#Stores the number of newborns in generation 0 for the current year t
self.population_timepath[0,t,:] = newborns
def plot_demographics(self, year):
#IMPORTANT!! PLOTS THE SUM OF THE SKILL CLASSES. THATS' WHAT THE axis=1 NONSENSE IS
num_simulated_years = self.population_timepath.shape[1]
if year - 2008 >= num_simulated_years:
print "\nERROR: WE HAVE ONLY SIMULATED UP TO THE YEAR", num_simulated_years+2008, "AND YOU REQUESTED TO PLOT DATA FOR THE YEAR", year
print"*SEE plot_demog_distribution IN class Region(object)*\n"
time.sleep(10)
return None
year_index = year - 2008
plt.clf()
plt.suptitle(str(self.name+" Data for "+str(year)))
plt.subplot(2, 2, 1)
plt.plot(range(91),self.population_timepath[:,year_index,:].sum(axis=1))
plt.title("Population Distribution")
plt.grid()
plt.subplot(2, 2, 2)
plt.plot(range(23,46),self.get_fertility_rate(year))
plt.xlim(23, 46)
plt.title("Fertility Rates")
plt.grid()
plt.subplot(2, 2, 3)
plt.plot(range(68,91), self.get_mortality_rate(year))
plt.xlim(68, 89)
plt.ylim(0, 1)
plt.title("Mortality Rates")
plt.grid()
plt.subplot(2, 2, 4)
plt.plot(range(65), self.get_total_netmigration_rate())
plt.title("Total Net Migration")
plt.grid()
plt.show()
plt.clf()
def plot_population_distribution(self, years):
years = np.array(years)
for y in range(len(years)):
yeartograph = years[y]
num_simulated_years = self.population_timepath.shape[1]
if yeartograph - 2008 < num_simulated_years:
#IMPORTANT! Plots the sum of the skill classes for each year
plt.plot(range(91), self.population_timepath[:,yeartograph-2008,:].sum(axis=1))
else:
print "\nERROR: WE HAVE ONLY SIMULATED UP TO THE YEAR", num_simulated_years+2008-1, "AND YOU REQUESTED TO PLOT THE YEAR", yeartograph
print"*SEE plot_population_distribution IN class Region(object)*\n"
time.sleep(15)
return None
plt.title(str(self.name + " Population Distribution"))
plt.legend(years)
plt.show()
def plot_total_population(self, year):
totalpopulation = self.population_timepath.sum(axis=0).sum(axis=1)[:year-2008+1]
plt.plot(range(2008, year+1), totalpopulation/1000000)
plt.title(self.name+" Population Change from 2008-"+ str(year))
plt.xlim(2008, year)
plt.xlabel('Year')
plt.ylabel('Population (Millions)')
plt.show()
def get_total_population(self, year):
totalpopulation = self.population_timepath.sum(axis=0).sum(axis=1)[:year-2008+1]
return totalpopulation
def get_population_distribution(self, year, returnclasses = False):
if returnclasses == True:
if year-2008 < self.population_timepath.shape[1]:
return self.population_timepath[:,year-2008,:]
else:
print "We have only calculated up till year", self.population_timepath.shape[0], "so we are returning data for that year"
return self.population_timepath[-1,:]
else: #if returnclasses == False
if year-2008 < self.population_timepath.shape[1]:
return self.population_timepath[:,year-2008,:].sum(axis=1)
else:
print "We have only calculated up till year", self.population_timepath.shape[0], "so we are returning data for that year"
return self.population_timepath[-1,:].sum(axis=1)
def get_fertility_rate(self,year, returnall = False):
if returnall == True:
return self.fertility_rates
if year-2008 < self.fertility_rates.shape[1]:
return self.fertility_rates[:,year-2008]
else:
print "\nThis data is too far out, so we are returning the steady-state value\n"
return self.fertility_rates[:,-1]
def get_mortality_rate(self, year, returnall = False):
if returnall == True:
return self.mortality_rates
if year-2008 < self.mortality_rates.shape[1]:
return self.mortality_rates[:,year-2008]
else:
print "\nThis data is too far out, so we are returning the steady-state value\n"
return self.mortality_rates[:,-1]
def get_total_netmigration_rate(self):
return self.net_migration.sum(axis=1)
def get_kids_mat(self, year, returnall = False):
if returnall == True:
return self.KID_mat
return self.KID_mat[:, year-2008, :]
def compare_countries(countries, year):
plt.clf()
plt.suptitle("Demographics "+str(year))
plt.subplot(1, 2, 1)
for r, region in enumerate(countries):
plt.plot(range(91),region.get_population_distribution(year))
plt.title("Age Distribution")
plt.xlabel('Age')
plt.ylabel('Population')
plt.grid()
plt.subplot(1,2,2)
for r, region in enumerate(countries):
plt.plot(range(2008, year+1), region.get_total_population(year)/1000000)
plt.title("Total Population (Millions)")
plt.xlim(2008, year)
plt.xlabel('Year')
plt.legend(countries, loc = "upper left", prop={'size':11})
plt.grid()
plt.show()
plt.clf()
plt.suptitle(" Data for the Year "+str(year))
plt.legend(regionlist, loc = "lower right")
plt.subplot(2, 2, 1)
for r, region in enumerate(countries):
plt.plot(range(23,46),region.get_fertility_rate(year))
plt.title("Fertility")
plt.xlim(23, 46)
plt.grid()
plt.subplot(2, 2, 2)
for r, region in enumerate(countries):
plt.plot(range(68,91),region.get_mortality_rate(year))
plt.title("Mortality")
plt.xlim(68, 89)
plt.ylim(0, 1)
plt.legend(countries, loc = "upper left", prop={'size':11})
plt.grid()
plt.subplot(2, 2, 3)
for r, region in enumerate(countries):
plt.plot(range(65),region.get_total_netmigration_rate())
plt.title("Net Migration")
plt.grid()
plt.show()
plt.clf()
USA = Region("USA", 1)
EU = Region("EU", 2)
Japan = Region("Japan", 3)
China = Region("China", 4)
India = Region("India", 5)
Russia = Region("Russia", 6)
Korea = Region("Korea", 7)
regionlist = [USA, EU, Japan, China, India, Russia, Korea]
#for index, region in enumerate(regionlist):
#region.simulate_demographics(300)
#Russia.plot_demographics(2058)
Japan.simulate_demographics(300)
Japan.plot_population_distribution([2008, 2138])
print "Done" | mit |
rajeevsingh717/ThinkStats2 | code/analytic.py | 69 | 6265 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/projections/__init__.py | 21 | 3371 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes
from .polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = list(six.iterkeys(self._all_projection_types))
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes,
MollweideAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def process_projection_requirements(figure, *args, **kwargs):
"""
Handle the args/kwargs to for add_axes/add_subplot/gca,
returning::
(axes_proj_class, proj_class_kwargs, proj_stack_key)
Which can be used for new axes initialization/identification.
.. note:: **kwargs** is modified in place.
"""
ispolar = kwargs.pop('polar', False)
projection = kwargs.pop('projection', None)
if ispolar:
if projection is not None and projection != 'polar':
raise ValueError(
"polar=True, yet projection=%r. "
"Only one of these arguments should be supplied." %
projection)
projection = 'polar'
# ensure that the resolution keyword is always put into the key
# for polar plots
if projection == 'polar':
kwargs.setdefault('resolution', 1)
if isinstance(projection, six.string_types) or projection is None:
projection_class = get_projection_class(projection)
elif hasattr(projection, '_as_mpl_axes'):
projection_class, extra_kwargs = projection._as_mpl_axes()
kwargs.update(**extra_kwargs)
else:
raise TypeError('projection must be a string, None or implement a '
'_as_mpl_axes method. Got %r' % projection)
# Make the key without projection kwargs, this is used as a unique
# lookup for axes instances
key = figure._make_key(*args, **kwargs)
return projection_class, kwargs, key
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| mit |
UIKit0/marsyas | src/marsyas_python/plot_spectrogram.py | 5 | 4687 | #!/usr/bin/env python
# This utility will plot beautiful spectrograms of your sound files. You will have to specify a lot of parameters,
# but the good news is, the defaults will be set so that it will fit most people's needs.
#
# The parameters you have to set are:
# - Input file name
# - Frame step / Frame length (in samples)
# - Minimum and maximum frequency for analysis
# - Minimum and maximum time for analysis
# - Output width and height
import argparse
import marsyas
import marsyas_util
import time
import numpy
import math
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Quickly plot beautiful spectrograms for your audio files.')
parser.add_argument('--fname', dest='Filename', type=str, default='test.wav', help='Filename from where data will be extracted')
parser.add_argument('--flen', dest='Window_len', type=int, default=2048, help='Length (samples) of the window for analysis')
parser.add_argument('--fstep', dest='Window_step', type=int, default=1024, help='Step (samples) of the sliding window used for analysis')
parser.add_argument('--minfreq', dest='Min_freq', type=float, default=110, help='Minimum frequency (Hz) show in the spectrogram')
parser.add_argument('--maxfreq', dest='Max_freq', type=float, default=3000, help='Maximum frequency (Hz) show in the spectrogram')
parser.add_argument('--maxtime', dest='Max_time', type=float, default=9000, help='Maximum time (s) show in the spectrogram')
parser.add_argument('--zeropad', dest='Zero_padding', type=float, default=1, help='Zero padding factor (the DFT is calculated after zero-padding the input to this times the input length - use 1 for standard DFT)')
parser.add_argument('--width', dest='Width', type=int, default=450, help='Width of the plot')
parser.add_argument('--height', dest='Height', type=int, default=200, help='Height of the plot')
parser.add_argument('--window', dest='Window', type=str, default='Hanning', help='Shape of the window that will be used to calculate the spectrogram')
args = parser.parse_args()
# Create our Marsyas network for audio analysis
spec_analyzer = ["Series/analysis", ["SoundFileSource/src", "Sum/summation", "Gain/gain", "ShiftInput/sft", "Windowing/win","Spectrum/spk","PowerSpectrum/pspk", "Memory/mem"]]
net = marsyas_util.create(spec_analyzer)
snet = marsyas_util.mar_refs(spec_analyzer)
# Configure the network
net.updControl(snet["src"]+"/mrs_string/filename", args.Filename)
nSamples = net.getControl(snet["src"]+"/mrs_natural/size").to_natural()
fs = net.getControl(snet["src"]+"/mrs_real/osrate").to_real()
dur = nSamples/fs
print "Opened ", args.Filename
print "It has ", nSamples, " samples at ", fs, " samples/second to a total of ", dur," seconds"
memFs = fs/args.Window_step # Sampling rate of the memory buffer
dur = min(dur, args.Max_time)
memSize = int(dur*memFs)
net.updControl("mrs_natural/inSamples", args.Window_step);
net.updControl(snet["gain"]+"/mrs_real/gain", args.Window_len*1.0); # This will un-normalize the DFT
net.updControl(snet["sft"]+"/mrs_natural/winSize", args.Window_len);
net.updControl(snet["win"]+"/mrs_natural/zeroPadding",args.Window_len * (args.Zero_padding-1));
net.updControl(snet["win"]+"/mrs_string/type", args.Window); # "Hamming", "Hanning", "Triangle", "Bartlett", "Blackman"
net.updControl(snet["pspk"]+"/mrs_string/spectrumType", "logmagnitude2"); # "power", "magnitude", "decibels", "logmagnitude" (for 1+log(magnitude*1000), "logmagnitude2" (for 1+log10(magnitude)), "powerdensity"
net.updControl(snet["mem"]+"/mrs_natural/memSize", memSize)
# Run the network to fill the memory
for i in range(memSize):
net.tick()
# Gather results to a numpy array
out = net.getControl("mrs_realvec/processedData").to_realvec()
DFT_Size = int(len(out)*1.0/memSize)
if numpy.ndim(out)==1:
out = numpy.array([out])
out = numpy.reshape(out,(memSize, DFT_Size))
out = numpy.transpose(out)
# Cut information that we do not want
minK = args.Min_freq*DFT_Size/fs
maxK = args.Max_freq*DFT_Size/fs
out = out[minK:maxK+1]
out = out/numpy.max(out)
out = 1-out
# Plot ALL the numbers!!!
im=plt.imshow(out, aspect='auto', origin='lower', cmap=plt.cm.gray, extent=[0,dur,args.Min_freq,args.Max_freq])
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
fig = plt.gcf()
width_inches = args.Width/80.0
height_inches = args.Height/80.0
fig.set_size_inches((width_inches,height_inches))
plt.savefig('out.png',bbox_inches='tight')
plt.savefig('out.pdf',bbox_inches='tight')
#plt.show()
| gpl-2.0 |
bhargav/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
megan-guidry/dssr2017ABI-mgui210 | InterfaceTest.py | 1 | 6469 | #In this Interface script, an object of class "figureCreate" is created for each of
#the ___ models in the journal article titled _______. The name of each of these objects
#is simply "Figure_" + the Figure number (e.g. Figure_1). Each figureCreate object
#has 4 attributes:
# 1) The Figure number
# 2) The Main Model needed to generate the data within the figure
# 3) The specific cross-bridge model that the main model requires
# 4) The specific Ca2+ model that the main model requires
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
import itertools
import sys
class figureCreate:
def __init__(self, figureNumber, mainModel, xbModel, ca2Model, xVariables, yVariables, ca2Type, caption):
self.figureNumber = figureNumber #This is an integer value
self.mainModel = mainModel #This is a cellml file
self.xbModel = xbModel #This is a cellml file
self.ca2Model = ca2Model #This is a cellml file
self.xVariables = xVariables #This is the CSV column title(s) (e.g. [A, A])
self.yVariables = yVariables #This is the CSV column title(s) (e.g. [B, D])
self.ca2Type = ca2Type #This is either fixed (F) or Dynamic (D)
self.caption = caption
def run_and_plot(figureNumber, fig2Reproduce, contractionType, contractionTypeValues, numberOfFiles):
for i in range(numberOfFiles):
outputDataFiles = os.listdir("Test_Output")
#Create the .csv output data file name (based on object attributes):
dataFile = figureNumber + "_" + str(fig2Reproduce.ca2Type) + "_" + contractionType + str(contractionTypeValues[i]) + ".CSV"
#Determine the path to the "Test_Output" folder so that we know where to look for the output data once it is created:
outputDataPath = os.path.join("Test_Output", dataFile)
print("Creating file: " + dataFile)
#Run the MeganModel (A simulation needs to be run for each iteration of the for loop)
modelVersionsFile = os.listdir("modelVersions")
testConnectionPath = os.path.join("modelVersions", "testConnection.py")
print(testConnectionPath)
exec(open(testConnectionPath).read()) #This line executes a python file located in the modelVersions folder.
xData = []
yData = []
with open(outputDataPath, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
next(plots, None) #Skip the header line
#for row in islice(plots, 542859, 571430, None):
for row in plots:
xData.append(float(row[1])/2.3)
yData.append(float(row[3]))
plt.plot(xData, yData, label='Loaded from file!')
def main():
#The "Figures" array contains a figureCreate object for each figure in the Manuscript
Figures = []
Figures.append(figureCreate(1, "Main", "XB", "Ca2", ["xdata1", "xdata2", "xdata3"], ["ydata1", "ydata2", "ydata3"], "D", "In this figure, work-loop contractions at differing afterlaods and isometric contractions at different sarcomere lengths, are performed"))
Figures[1 - 1].afterloads = [0.12, 0.15, 0.2]
Figures[1 - 1].sarcomereLengths = [1.9359, 2.0139, 2.1054]
###################################################################################################################
######This next chunk of code grabs the appropriate models (based on the user input) and runs them on hpc.#########
#The model version run is based on what figure (Figure_1, Figure_2, etc...) the user wants to replicate.
#Creating a pointer to the proper figureCreate object based on user input
def check_userInput(typed_input):
try:
typed_input = int(typed_input)
return True
except ValueError:
print("Error: You have not input an integer value")
return False
userInput = input("Please enter the figure number you want to reproduce: ")
check_userInput(userInput)
fig2Reproduce = Figures[int(userInput)-1] #fig2Reproduce is the figureCreate object the code whose attributes include the model names and data values needed to recreate the specific figure
figureNumber = "Figure" + str(userInput)
print("Reproducing " + "Figure " + str(userInput) + ", please wait...")
#Create the .csv output data file name (based on object attributes) This Filename, called dataFile, will be imported into the protocol code.
#NAMING CONVENTION: Identify which file to access (which file has the data you need) based on an objects attributes and the matching filename
#To grab the correct file from the Output folder, I need to know:
# 1) the figureNumber
# 2) Fixed or dynamic [Ca2+]i (e.g. F, D) --> this also comes from the model version run
# 3) the contraction type (e.g. WL, Iso, QR) --> this comes from the Model version run
# 4) The afterload value or sarcomere length(e.g. 0.15)
# 5) I also need to know the .CSV columns that hold the data. This information is saved in an object attribute
#How to determine whether a createFigureobject has a .aftreload attribute (indicating work-loops), a .sarcomereLengths attribute (indicating Isometric contractions), or both
TorF_WL = hasattr(fig2Reproduce, "afterloads")
TorF_Iso = hasattr(fig2Reproduce, "sarcomereLengths")
if TorF_WL == True:
contractionType = "WL"
contractionTypeValues = fig2Reproduce.afterloads
numberOfFiles = len(contractionTypeValues)
run_and_plot(figureNumber, fig2Reproduce, contractionType, contractionTypeValues, numberOfFiles)
elif TorF_Iso == True:
contractionType = "Iso"
contractionTypeValues = fig2Reproduce.sarcomereLengths
numberOfFiles = len(contractionTypeValues)
run_and_plot(figureNumber, fig2Reproduce, contractionType, contractionTypeValues, numberOfFiles)
#Formatting and displaying the figure:
plt.xlabel("Normalised Sarcomere Length")
plt.ylabel("Normalised Total Force")
plt.title(figureNumber)
plt.axis([0.75, 1, 0, 0.5])
plt.text(.1,.1,fig2Reproduce.caption)
F = plt.show() #plt.show is place after the loop so that all data plotted in the loop will show on one figure
if __name__ == "__main__":
main()
| apache-2.0 |
Superchicken1/SambaFlow | python/traffic-prediction/src/models/complete_vector/NN.py | 1 | 1277 | from keras.layers import Dense
from keras.models import Sequential
from sklearn import preprocessing
from src.misc.evaluation import mape
import numpy as np
import pandas as pd
x_train = pd.read_csv('train_X.csv', index_col=0)
x_test = pd.read_csv('test_X.csv', index_col=0)
y_train = pd.read_csv('train_Y.csv', index_col=0)
y_test = pd.read_csv('test_Y.csv', index_col=0)
x_dim = len(x_train.columns)
y_dim = len(y_train.columns)
x_train = x_train.as_matrix()
x_test = x_test.as_matrix()
y_train = y_train.as_matrix()
y_test = y_test.as_matrix()
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(np.concatenate((x_train, x_test)))
X_train_scale = min_max_scaler.transform(x_train)
X_test_scale = min_max_scaler.transform(x_test)
model = Sequential()
model.add(Dense(input_dim=x_dim, output_dim=100, activation='relu'))
model.add(Dense(input_dim=100, output_dim=200,activation='relu'))
model.add(Dense(input_dim=200, output_dim=y_dim,activation='relu'))
model.compile(loss='mean_absolute_percentage_error', optimizer='rmsprop')
model.fit(X_train_scale, y_train,
batch_size=1, epochs=50, verbose=2,
validation_data=(X_test_scale, y_test), shuffle=False)
y = model.predict(X_test_scale, batch_size=1)
mape = mape(y, y_test)
print(mape) | apache-2.0 |
tntnatbry/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 5 | 9272 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/__init__.py | 2 | 3135 | """
Machine Learning module in python
=================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.sourceforge.net for complete documentation.
"""
import sys
__version__ = '0.12.1'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
try:
from numpy.testing import nosetester
class _NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from sklearn import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(_NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage)
try:
test = _NoseTester(raise_warnings="release").test
except TypeError:
# Older versions of numpy do not have a raise_warnings argument
test = _NoseTester().test
del nosetester
except:
pass
__all__ = ['cross_validation', 'cluster', 'covariance',
'datasets', 'decomposition', 'feature_extraction',
'feature_selection', 'semi_supervised',
'gaussian_process', 'grid_search', 'hmm', 'lda', 'linear_model',
'metrics', 'mixture', 'naive_bayes', 'neighbors', 'pipeline',
'preprocessing', 'qda', 'svm', 'test', 'clone', 'pls']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs
"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform()*(2**31-1)
_random_seed = int(_random_seed)
print "I: Seeding RNGs with %r" % _random_seed
np.random.seed(_random_seed)
random.seed(_random_seed)
| agpl-3.0 |
slawler/slawler.github.io | nwm_pages/plts/usgs_for_nwm_datagrabber.py | 1 | 5379 | # -*- coding: utf-8 - Python 3.5.1 *-
"""
Description: Grab Time Series data From USGS Web Service
Input(s) : USGS Gages, Parameters
Output(s) : .rdb time series files
[email protected]
Created on Tue Apr 19 15:08:33 2016
"""
# Import libraries
import pandas as pd
import requests
import json
from datetime import datetime, timedelta
from collections import OrderedDict
import matplotlib.pyplot as plt
from matplotlib import pylab
from matplotlib.dates import DayLocator, HourLocator, DateFormatter
from matplotlib.font_manager import FontProperties
def GetTimeSeries(gage, start, stop ):
#parameter = ["00060","00065"] # Try Flow first
parameter = ["00065","00060"] # Try Stage First
dformat = "json" # Data Format
url = 'http://waterservices.usgs.gov/nwis/iv' # USGS API
# Format Datetime Objects for USGS API
first = datetime.date(start).strftime('%Y-%m-%d')
last = datetime.date(stop).strftime('%Y-%m-%d')
# Ping the USGS API for data
try:
params = OrderedDict([('format',dformat),('sites',gage),('startDT',first),
('endDT',last), ('parameterCD',parameter[0])])
r = requests.get(url, params = params)
print("\nRetrieved Data for USGS Gage: ", gage)
data = r.content.decode()
d = json.loads(data)
mydict = dict(d['value']['timeSeries'][0])
except:
params = OrderedDict([('format',dformat),('sites',gage),('startDT',first),
('endDT',last), ('parameterCD',parameter[1])])
r = requests.get(url, params = params)
print("\nRetrieved Data for USGS Gage: ", gage)
data = r.content.decode()
d = json.loads(data)
mydict = dict(d['value']['timeSeries'][0])
if params['parameterCD'] == '00060':
obser = "StreamFlow"
else:
obser = "Stage"
# Great, We can pull the station name, and assign to a variable for use later:
SiteName = mydict['sourceInfo']['siteName']
print('\n', SiteName)
# After reveiwing the JSON Data structure, select only data we need:
tseries = d['value']['timeSeries'][0]['values'][0]['value'][:]
# Create a Dataframe, format Datetime data,and assign numeric type to observations
df = pd.DataFrame.from_dict(tseries)
df.index = pd.to_datetime(df['dateTime'],format='%Y-%m-%d{}%H:%M:%S'.format('T'))
df['UTC Offset'] = df['dateTime'].apply(lambda x: x.split('-')[3][1])
df['UTC Offset'] = df['UTC Offset'].apply(lambda x: pd.to_timedelta('{} hours'.format(x)))
df.index = df.index - df['UTC Offset']
df.value = pd.to_numeric(df.value)
# Get Rid of unwanted data, rename observed data
df = df.drop('dateTime', 1)
df.drop('qualifiers',axis = 1, inplace = True)
df.drop('UTC Offset',axis = 1, inplace = True)
df = df.rename(columns = {'value':obser})
return df
# Enter Desired Data Download Period
y0, m0 ,d0 = 2004, 10, 6 # Start date (year, month, day)
y1, m1 ,d1 = 2017, 1, 1
# Create Datetime Objects
start = datetime(y0, m0, d0,0)
stop = datetime(y1, m1 ,d1,0)
gage = "01651750" # 'Anacostia, DS Tidal Gage Max'
df_ANAD2 = GetTimeSeries(gage, start, stop)
max_anad = df_ANAD2.idxmax()[0]
gage = "01649500" #'Anacostia, NE Branch'
df_RVDM2 = GetTimeSeries(gage, start, stop)
max_rvdm = df_RVDM2.idxmax()[0]
gage = "01651000" # 'Anacostia, NW Branch'
df_ACOM2 = GetTimeSeries(gage, start, stop)
max_acom = df_ACOM2.idxmax()[0]
#---Set Plotting Window & Station Max ID
curr_plot = 'Anacostia, DS Tidal Gage Max'
pltfrom = max_anad- timedelta(days = 2)
pltto = max_anad + timedelta(days = 2)
curr_plot = 'Anacostia, NW Branch'
pltfrom = max_acom- timedelta(days = 2)
pltto = max_acom + timedelta(days = 2)
plt.interactive(False)
curr_plot = 'Anacostia, NE Branch'
pltfrom = max_rvdm- timedelta(days = 2)
pltto = max_rvdm + timedelta(days = 2)
#--------PLOT
fig, ax = plt.subplots(figsize=(14,8))
#--Plot medium_range NWM
x0 = df_ANAD2[pltfrom :pltto].index
y0 = df_ANAD2[pltfrom :pltto]['Stage']
x1 = df_RVDM2[pltfrom :pltto].index
y1 = df_RVDM2[pltfrom :pltto]['Stage']
x2 = df_ACOM2[pltfrom :pltto].index
y2 = df_ACOM2[pltfrom :pltto]['Stage']
ax.scatter(x0,y0, color='black', label='Anacostia, DS Tidal Gage')
ax.plot(x1,y1, color='r', label='Anacostia, NE Branch')
ax.plot(x2,y2, color='b', label='Anacostia, NW Branch')
ax.set_xlim(pltfrom,pltto)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, scatterpoints = 1)
ax.legend(loc='best', fontsize = 'small')
#--Write in Labels
plt.ylabel('Stage (ft)')
plt.xlabel(pltto.year)
plt.title('Local Max: {}'.format(curr_plot))
#plot_name = os.path.join(root_dir, 'Levee_Seg_{}.png'.format(segment))
plt.grid(True)
plt.gca().xaxis.set_major_formatter(DateFormatter('%I%p\n%a\n%b%d'))
plt.gca().xaxis.set_major_locator(HourLocator(byhour=range(24), interval=12))
plt.savefig(curr_plot+'.png', dpi=600)
| mit |
garnachod/SimpleDoc2Vec | doc2vecClass.py | 1 | 1464 | # classifier
from sklearn.linear_model import LogisticRegression
from gensim.models import Doc2Vec
import numpy
from GeneraVectores import GeneraVectores
from sklearn import svm
from NNet import NeuralNet
if __name__ == '__main__':
model = Doc2Vec.load('./imdb_dbow.d2v')
#print model["TRAIN_POS_8029"]
#exit()
dim = 100
train_arrays = numpy.zeros((25000, dim))
train_labels = numpy.zeros(25000)
generador = GeneraVectores(model)
Pos = generador.getVecsFromFile("data/trainpos.txt")
print "generados vectores Pos"
Neg = generador.getVecsFromFile("data/trainneg.txt")
print "generados vectores Neg"
for i in range(12500):
train_arrays[i] = Pos[i]
train_arrays[12500 + i] = Neg[i]
train_labels[i] = 1
train_labels[12500 + i] = 0
test_arrays = numpy.zeros((25000, dim))
test_labels = numpy.zeros(25000)
Pos = generador.getVecsFromFile("data/testpos.txt")
print "generados vectores Pos TEST"
Neg = generador.getVecsFromFile("data/testneg.txt")
print "generados vectores Neg TEST"
for i in range(12500):
test_arrays[i] = Pos[i]
test_arrays[12500 + i] = Neg[i]
test_labels[i] = 1
test_labels[12500 + i] = 0
classifier = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, penalty='l2', random_state=None, tol=0.0001)
classifier.fit(train_arrays, train_labels)
print "Regresion logistica"
print classifier.score(test_arrays, test_labels)
| gpl-2.0 |
matija94/show-me-the-code | data-science/CollectiveIntelligence/com/machinelearning/inaction/knn.py | 1 | 3026 | '''
Created on Apr 4, 2017
kNN: k Nearest Neighbors
Input: inX: vector to compare to existing dataset (1xN)
dataSet: size m data set of known vectors (NxM)
labels: data set labels (1xM vector)
k: number of neighbors to use for comparison (should be an odd number)
Output: the most popular class label
@author: matija
k nearest neighbors
'''
import numpy as np
import operator
import matplotlib.pyplot as plt
def createDataSet():
'''
for mocking purposes
'''
group = np.array([ [1.0,1.1], [1.0,1.0], [0,0], [0,0.1] ])
labels = ['A', 'A', 'B', 'B']
return group,labels
def file2matix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines())
returnMat = np.zeros((numberOfLines,3))
classLabelVector = []
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index+=1
return returnMat,classLabelVector
def autoNorm(dataSet):
'''
if the data has values that lie in diff ranges autoNorm will normalize the data
so each feature is treated 'equally' ( from 0 to 1)
uses function below to normalize the values between 0 and 1
ranges
newVal = (oldVal-minVal)/(max-min)
'''
#min vals from each col in mat
minVals = dataSet.min(0)
#max vals from each col in mat
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = np.zeros(np.shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - np.tile(minVals, (m,1))
normDataSet = normDataSet/np.tile(ranges, (m,1))
return normDataSet, ranges, minVals
def classify0(inX, dataSet, labels, k):
#value of rows in dataSet
dataSetSize = dataSet.shape[0]
#make new mat with same dim as dataSet and values from inX
# and subtract it from dataSet mat
diffMat = np.tile(inX, (dataSetSize,1)) - dataSet
#square mat
sqDiffMat = diffMat**2
#sum mat vectors into vector, using axis 1(means sum elems from same rows), axis 0
# would sum elements from same columns
sqDistances = sqDiffMat.sum(axis=1)
#square root every element in vector sqDistances
distances = sqDistances**0.5 #eq to np.sqrt(sqDistances)
### CODE ABOVE WAS USING EUCLIDEAN DISTANCE FORMULA
#sort distance indicies in increasing manner, resulting set will be indicies of
#sorted set
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
#compute list of tuples(label,classCount) in reversed order=>largest to smallest
sortedClassCount = sorted(classCount.iteritems(),
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
| mit |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/parser/common.py | 1 | 41697 | # -*- coding: utf-8 -*-
import csv
import os
import platform
import re
import sys
from datetime import datetime
import nose
import numpy as np
from numpy.testing.decorators import slow
from pandas.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import(StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.io.common import DtypeWarning, EmptyDataError, URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# C parser: supports only length-1 decimals
# Python parser: 'decimal' not supported yet
self.assertRaises(ValueError, self.read_csv,
StringIO(data), decimal='')
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_multiple_skts_example(self):
# TODO: Complete this
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11." # noqa
pass
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skip_footer is not supported with the C parser yet
if self.engine == 'python':
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assertRaisesRegexp(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skip_footer=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_duplicate_columns(self):
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
for method in ('read_csv', 'read_table'):
# check default behavior
df = getattr(self, method)(StringIO(data), sep=',')
self.assertEqual(list(df.columns),
['A', 'A.1', 'B', 'B.1', 'B.2'])
df = getattr(self, method)(StringIO(data), sep=',',
mangle_dupe_cols=False)
self.assertEqual(list(df.columns),
['A', 'A', 'B', 'B', 'B'])
df = getattr(self, method)(StringIO(data), sep=',',
mangle_dupe_cols=True)
self.assertEqual(list(df.columns),
['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
# TODO: complete this
df = self.read_csv(StringIO(data)) # noqa
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[
:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
# skip_footer is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/parser/data/salary.table.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(IOError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
s.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"''' # noqa
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
warning_type = False
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
# see gh-3866: if chunks are different types and can't
# be coerced using numerical types, then issue warning.
if self.engine == 'c' and self.low_memory:
warning_type = DtypeWarning
with tm.assert_produces_warning(warning_type):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_integer_overflow_bug(self):
# see gh-2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# see gh-5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(ValueError, self.read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# see gh-3374, gh-6607
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_chunk_begins_with_newline_whitespace(self):
# see gh-10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# see gh-9735: this issue is C parser-specific (bug when
# parsing whitespace and characters at chunk boundary)
if self.engine == 'c':
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = self.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# see gh-10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_empty_with_multiindex(self):
# see gh-10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_float_parser(self):
# see gh-9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_scientific_no_exponent(self):
# see gh-12215
df = DataFrame.from_items([('w', ['2e']), ('x', ['3E']),
('y', ['42e']), ('z', ['632E'])])
data = df.to_csv(index=False)
for prec in self.float_precision_choices:
df_roundtrip = self.read_csv(
StringIO(data), float_precision=prec)
tm.assert_frame_equal(df_roundtrip, df)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises(OverflowError, self.read_csv,
StringIO(data), converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = self.read_csv(StringIO(str(x)), header=None)
expected = DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# see gh-9535
expected = DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
# 'as_recarray' is not supported yet for the Python parser
if self.engine == 'c':
result = self.read_csv(StringIO('foo,bar\n'),
nrows=10, as_recarray=True)
result = DataFrame(result[2], columns=result[1],
index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
result = next(iter(self.read_csv(
StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# see gh-10728, gh-10548
# With skip_blank_lines = True
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# gh-10728: WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# gh-10548: EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
def test_uneven_lines_with_usecols(self):
# See gh-12203
csv = r"""a,b,c
0,1,2
3,4,5,6,7
8,9,10
"""
# make sure that an error is still thrown
# when the 'usecols' parameter is not provided
msg = "Expected \d+ fields in line \d+, saw \d+"
with tm.assertRaisesRegexp(ValueError, msg):
df = self.read_csv(StringIO(csv))
expected = DataFrame({
'a': [0, 3, 8],
'b': [1, 4, 9]
})
usecols = [0, 1]
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
usecols = ['a', 'b']
df = self.read_csv(StringIO(csv), usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_read_empty_with_usecols(self):
# See gh-12493
names = ['Dummy', 'X', 'Dummy_2']
usecols = names[1:2] # ['X']
# first, check to see that the response of
# parser when faced with no provided columns
# throws the correct error, with or without usecols
errmsg = "No columns to parse from file"
with tm.assertRaisesRegexp(EmptyDataError, errmsg):
self.read_csv(StringIO(''))
with tm.assertRaisesRegexp(EmptyDataError, errmsg):
self.read_csv(StringIO(''), usecols=usecols)
expected = DataFrame(columns=usecols, index=[0], dtype=np.float64)
df = self.read_csv(StringIO(',,'), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
expected = DataFrame(columns=usecols)
df = self.read_csv(StringIO(''), names=names, usecols=usecols)
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa
expected = DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# gh-8661, gh-8679: this should ignore six lines including
# lines with trailing whitespace and blank lines
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# gh-8983: test skipping set of rows after a row with trailing spaces
expected = DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_raise_on_sep_with_delim_whitespace(self):
# see gh-6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# see gh-9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_regex_separator(self):
# see gh-6607
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
data = ' a b c\n1 2 3 \n4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try: # engines are verbose in different ways
self.read_csv(StringIO(text), verbose=True)
if self.engine == 'c':
self.assertIn('Tokenization took:', buf.getvalue())
self.assertIn('Parser memory cleanup took:', buf.getvalue())
else: # Python engine
self.assertEqual(buf.getvalue(),
'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try: # engines are verbose in different ways
self.read_csv(StringIO(text), verbose=True, index_col=0)
if self.engine == 'c':
self.assertIn('Tokenization took:', buf.getvalue())
self.assertIn('Parser memory cleanup took:', buf.getvalue())
else: # Python engine
self.assertEqual(buf.getvalue(),
'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
if self.engine == 'c':
tm.assertRaises(Exception, self.read_table,
f, squeeze=True, header=None)
else:
result = self.read_table(f, squeeze=True, header=None)
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
| mit |
JohanWesto/receptive-field-models | rf_models/rf_helper.py | 1 | 17339 | #!/usr/bin/python
"""
" @section DESCRIPTION
" Helper functions for training and evaluating RF models
"""
import os
import numpy as np
import cPickle as pickle
from scipy.io import loadmat
from scipy.linalg import toeplitz
from sklearn.neighbors import kneighbors_graph
from numpy.lib import stride_tricks
from operator import mul
from cython.rf_cython import cross_corr_c, cf_mat_der_c
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def add_fake_dimension(org_ndarray, time_win_size):
""" Rolls a time window over a vector and extract the window content
Stride_tricks only affect the shape and strides in the array interface.
The memory footprint is therefore equal for both org_ndarray and
fake_ndarray.
Important!!!
The time dimension in X must be along the first dimension (axis=0)
Args:
org_ndarray: vector to roll the window over
time_win_size: window size in vector elements (time dimension)
Returns:
fake_ndarray:
Raises:
"""
n_element = org_ndarray.size
element_size = org_ndarray.itemsize
input_dims = org_ndarray.shape
stride_length = 1
for dims in input_dims[1:]:
stride_length *= dims
org_1darray = org_ndarray.ravel()
shape = (n_element/stride_length - time_win_size + 1,
time_win_size*stride_length)
strides = (stride_length*element_size, element_size)
fake_2darray = stride_tricks.as_strided(org_1darray,
shape=shape,
strides=strides)
new_shape = [shape[0], time_win_size]
for dims in input_dims[1:]:
new_shape.append(dims)
fake_ndarray = fake_2darray.reshape(new_shape)
return fake_ndarray
def gaussian_field(shape, origin):
""" Generates a multi-dimensional Gaussian field
:param shape:
:param origin:
:return:
"""
cov_inv = np.diag(np.ones(3))
# cov_inv = np.diag([10. / shape[1], 10. / shape[0], 10. / shape[2]])
dim0, dim1, dim2 = np.meshgrid(np.arange(shape[1]) - shape[1] / 2 - origin[1],
np.arange(shape[0]) - shape[0] / 2 - origin[0],
np.arange(shape[2]) - shape[2] / 2 - origin[2])
x = np.vstack([dim0.ravel(), dim1.ravel(), dim2.ravel()])
tmp = (x * np.dot(cov_inv, x)).sum(axis=0)
field = np.exp(-0.5 * tmp).reshape(shape)
field /= field.max()
return field
def smooth_reg_l(shape):
""" Smooth regularization using a n-D discrete Laplace operator
:param shape:
:return reg_l:
"""
shape = [dim for dim in shape if dim > 1]
if len(shape) == 1:
row = np.concatenate([[-2, 1], np.zeros(shape[0] - 2)])
reg_l = toeplitz(row)
reg_l[0, :] = 0
reg_l[-1, :] = 0
else:
if len(shape) == 2:
dim0, dim1 = np.meshgrid(range(shape[1]), range(shape[0]))
dim = np.vstack([dim0.ravel(), dim1.ravel()])
elif len(shape) == 3:
dim0, dim1, dim2 = np.meshgrid(range(shape[1]),
range(shape[0]),
range(shape[2]))
dim = np.vstack([dim0.ravel(), dim1.ravel(), dim2.ravel()])
con_mat = kneighbors_graph(dim.T, 6, mode='distance').toarray()
con_mat[con_mat > 1] = 0
connections_per_node = con_mat.sum(axis=0)
con_mat[con_mat == 1] = -1
con_mat[np.diag_indices_from(con_mat)] = connections_per_node
reg_l = con_mat
return reg_l
def field_part_der(x_nd, field, part_idx):
""" Field part derivative in multilinear (separable) models
:param x_nd:
:param field:
:param part_idx:
:return part_der:
"""
n_parts = len(field.parts)
# Create the outer product between non-part_idx parts
cross_idx = range(part_idx) + \
range(part_idx + 1, n_parts)
part_cross = outer_product(field.parts, cross_idx)
# Sum up contributions along other dimensions
x_axes = range(1, part_idx + 1) + \
range(part_idx + 2, 2 + part_cross.ndim)
field_axes = range(len(part_cross.shape))
part_der = np.tensordot(x_nd, part_cross, axes=(x_axes, field_axes))
return part_der
def sta_and_stc(x_2d, y):
""" Calculate the STA and the STC
Args:
x_2d: input array (assumed to have zero mean)
y: output array
Returns:
sta:
stc:
Raise
"""
# Select the spike triggered ensemble
x_2d_ste = x_2d[y.ravel() > 0, :]
# STA
yx_2d_ste = x_2d_ste * y[y > 0, None]
sta = np.sum(yx_2d_ste, axis=0) / y.sum()
# STC
# Remove the STA
x_2d_ste -= sta
yx_2d_ste = x_2d_ste * y[y > 0, None]
stc = np.dot(yx_2d_ste.T, x_2d_ste) / (y.sum()-1)
return sta, stc
def get_insignificant_basis(x, y, rf_shape):
# Make a 2D matrix
x_nd = add_fake_dimension(x, rf_shape[0])
x_nd_full = x_nd.copy()
n_samples = x_nd_full.shape[0]
rf_size = reduce(mul, rf_shape)
x_2d = x_nd_full.reshape(n_samples, rf_size)
# Mean center and whiten
x_2d -= x_2d.mean(axis=0)
x_2d /= x_2d.std(axis=0)
_, stc = sta_and_stc(x_2d, y)
eig_val, eig_vec = np.linalg.eig(stc)
sort_idxs = np.argsort(eig_val)
n_zero_val = (np.abs(eig_val) < 1e-10).sum()
middle_idx = (sort_idxs.size - n_zero_val) / 2 + n_zero_val
# insignificant_basis = np.real(eig_vec[:, sort_idxs[middle_idx]])
# rf = insignificant_basis.reshape(rf_shape)
# return rf
rfs = []
for i in range(-2, 3, 1):
insignificant_basis = np.real(eig_vec[:, sort_idxs[middle_idx + i]])
rfs.append(insignificant_basis.reshape(rf_shape))
return rfs
def scale_params(params):
for cf_id in range(len(params.cfs)):
scale_factor = 1 / params.cfs[cf_id].bias
params.rfs[0].field[params.context_map == cf_id] /= scale_factor
params.cfs[cf_id].field *= scale_factor
params.cfs[cf_id].bias *= scale_factor
return params
def outer_product(parts, cross_idx=[]):
""" Calculates an outer product between 1 to 3 vectors
Args:
parts: list with vectors
cross_idx: indices indicating which vectors to multiply
Returns:
part_cross
Raise
Exception if more than three parts
"""
# If part_cross is empty we use all vecotrs
if len(cross_idx) == 0:
cross_idx = range(len(parts))
# Outer product between selected vectors
if len(cross_idx) == 1:
part_cross = parts[cross_idx[0]]
elif len(cross_idx) == 2:
if parts[cross_idx[0]].ndim == parts[cross_idx[1]].ndim:
part_cross = np.outer(parts[cross_idx[0]], parts[cross_idx[1]])
else:
part_cross = parts[cross_idx[0]][:, np.newaxis, np.newaxis] * \
parts[cross_idx[1]]
elif len(cross_idx) == 3:
part_cross = parts[cross_idx[0]][:, np.newaxis, np.newaxis] * \
np.outer(parts[cross_idx[1]], parts[cross_idx[2]])
else:
raise Exception("Can only handle max 3 parts")
return part_cross
def inner_product(x_nd, rfs):
""" Calculates the inner product between between multidimensional arrays
This function calculates a generalized multidimensional euclidean inner
product using numpy.tensordot as numpy.dot can't handle multidimensional
matrices. The inner product is calculated for each provided receptive field
and stored columnwise in the matrix inner_product
Args:
x_nd: multidimensional input array
rfs: list with receptive fields
Returns:
inner_product_nd:
Raise
"""
# Stores the inner product from each receptive field in separate columns
inner_product_nd = np.empty([x_nd.shape[0], len(rfs)])
for rf, rf_idx in zip(rfs, range(len(rfs))):
# Inner product
x_axes = range(1, len(x_nd.shape))
rf_axes = range(len(rf.shape))
inner_product_nd[:, rf_idx] = np.tensordot(x_nd,
rf.field,
axes=(x_axes, rf_axes))
# Check whether this is a quadratic filter
if hasattr(rf, 'qn_square') and rf.qn_square:
inner_product_nd[:, rf_idx] *= \
rf.qn_lambda * inner_product_nd[:, rf_idx]
# Add the filter's bias term
inner_product_nd[:, rf_idx] += rfs[rf_idx].bias
return inner_product_nd
def cross_corr(x, rf):
""" Calculates the cross-correlation between x and rf
Computes the cross-correlation between x and rf without the need to
create a large input matrix by adding a fake dimension.
The function is a python wrapper for the cython function:
cross_corr_c()
Args:
x: input array
rf: receptive field
Returns:
z: similarity score
Raise
"""
win_size = rf.field.size
stride = reduce(mul, x.shape[1:])
n_vals = x.shape[0] - rf.shape[0] + 1
z = np.empty(n_vals)
z[:] = cross_corr_c(x.ravel(), rf.field.ravel(), n_vals, stride, win_size)
# z += rf.bias
return z
def cf_mat_der(x, e, rf):
win_size = rf.field.size
stride = reduce(mul, x.shape[1:])
n_vals = x.shape[0] - rf.shape[0] + 1
cf_der_sum = np.zeros(win_size)
cf_der_sum[:] = cf_mat_der_c(x.ravel(), e.ravel(), rf.field.ravel(), n_vals, stride, win_size)
cf_der_sum = cf_der_sum / n_vals
return cf_der_sum
def z_dist(z, y, n_bins):
"""Approximates the similarity score distributions P(z) and P(z|spike)
IMPORTANT!
This function ONLY uses the first two receptive fields in the LN-model
Args:
z: similarity score array
y: spike count array
n_bins: number of bins to use when approximating the distribution
Returns:
p_z: P(z)
p_z_spike: P(z|spike)
z_edges: bin edge values
Raises:
Exception if z has more than two receptive fields (columns)
"""
# The histogram range goes linearly between -n_std to + n_std
n_std = 3
# scores resulting in one or more spikes
spike_in_bin = (y > 0).ravel() # spike indicator vector
z_spike = z.compress(spike_in_bin, axis=0)
# We use weights to account for situations were an input caused more
# than one spike.
z_edges = []
# One receptive field
if z.shape[1] == 1:
edges = np.linspace(z.mean() - n_std * z.std(),
z.mean() + n_std * z.std(), n_bins - 1)
edges = np.insert(edges, 0, -np.inf)
edges = np.append(edges, np.inf)
# P(z)
z_count, edges = np.histogram(z.ravel(), edges)
# P(z|spike)
weights = y[y > 0]
z_count_spike, edges = np.histogram(z_spike.ravel(),
edges,
weights=weights.ravel())
z_count = z_count[:, None]
z_count_spike = z_count_spike[:, None]
z_edges.append(edges)
# Two receptive fields
elif z.shape[1] >= 2:
edges_row = np.linspace(z[:, 0].mean() - n_std * z[:, 0].std(),
z[:, 0].mean() + n_std * z[:, 0].std(),
n_bins - 1)
edges_row = np.insert(edges_row, 0, -np.inf)
edges_row = np.append(edges_row, np.inf)
edges_col = np.linspace(z[:, 1].mean() - n_std * z[:, 1].std(),
z[:, 1].mean() + n_std * z[:, 1].std(),
n_bins - 1)
edges_col = np.insert(edges_col, 0, -np.inf)
edges_col = np.append(edges_col, np.inf)
# P(z)
z_count, edges_row, edges_col = \
np.histogram2d(z[:, 0].ravel(),
z[:, 1].ravel(),
[edges_row, edges_col])
# P(z|spike)
weights = y[y > 0]
z_count_spike, edges_row, edges_col = \
np.histogram2d(z_spike[:, 0].ravel(),
z_spike[:, 1].ravel(),
[edges_row, edges_col],
weights=weights)
z_edges.append(edges_row)
z_edges.append(edges_col)
if z.shape[1] > 2:
print "Warning! Probability distributions are only evaluated using " \
"the first two filters in LN-models with more than two filters."
p_z = np.float64(z_count) / np.sum(z_count)
p_z_spike = np.float64(z_count_spike) / np.sum(z_count_spike)
# Manipulates the last score bin edge to make sure that also the
# largest score falls into the last bin
for dim in range(len(z_edges)):
z_edges[dim][-1] += 1e-10
return p_z, p_z_spike, z_edges
def calculate_r(vec_1, vec_2):
""" Calculates the pearson r correlation coefficient
Args:
vec_1: first vector
vec_2: second vector
Returns:
Raises:
"""
# Make sure the both vectors are one-dimensional
vec_1 = vec_1.ravel()
vec_2 = vec_2.ravel()
# The following should be equal to scipy.stats.pearsonr
r = np.mean((vec_1 - np.mean(vec_1)) * (vec_2 - np.mean(vec_2))) / np.std(vec_1) / np.std(vec_2)
return r
def load_mat_dat_file(file_name):
""" Load simulated or recorded data
:param file_name: file name including path
:return data:
"""
# Separate behaviour for pickled Python *.dat files
if file_name[-3:] == 'dat':
data = pickle.load(open(file_name, 'rb'))
# and Matlab *.mat files
elif file_name[-3:] == 'mat':
data_mat = loadmat(file_name)
data = {'x': np.float64(data_mat['x']),
'x_labels': [label[0] for label in data_mat['x_labels'][0]],
'x_ticks': [ticks.tolist() for ticks in data_mat['x_ticks'][0]],
'y': np.float64(data_mat['y']),
'name': data_mat['name'][0],
'origin': data_mat['origin'][0],
'params': {'dt': data_mat['dt_ms'][0, 0]}
}
else:
raise Exception("Unknown file format: {}".format(file_name[-3:]))
return data
def load_saved_models(load_path, tag=None):
""" Load saved rf models in specified directory
:param load_path:
:return:
"""
models = []
if load_path is not None:
if os.path.isdir(load_path):
contents = os.listdir(load_path)
# Filter by tag
if tag is not None:
contents = [s for s in contents if tag in s]
for file_name in sorted(contents):
# Assume that all *.dat files are saved models
if file_name[-3:] == 'dat':
model = pickle.load(open(load_path + file_name, 'rb'))
models.append(model)
else:
print "Provided model path does not exist!"
else:
print "No model path provided!"
return models
def load_saved_models_old(results_path, result_files=[]):
""" Read pickled models
Args:
results_path: path to results folder
result_files: stored files to read
Returns:
all_fields: rfs and cfs in all files
all_simulation_data: simulation data form all files
Raises:
"""
all_fields = [] # STRF, CF, and r-values
all_simulation_data = [] # Configuration used
# Load all files with a *.dat extension if no file names are provided
if len(result_files) == 0:
for file in os.listdir(results_path):
if file.endswith(".dat"):
result_files.append(file)
for result_file in result_files:
with open(results_path+result_file, 'rb') as handle:
results = pickle.load(handle)
n_models = len(results['models'])
rfs = []
rf_names =[]
cfs = []
cf_names = []
r_train = []
r_test = []
obj_fun_val = []
for i in range(n_models):
name = results['models'][i].name
if name.rfind('_') >= 0:
name = name[0:name.rfind('_')]
else:
name += str(len(results['models'][i].rfs))
for rf in results['models'][i].rfs:
if len(rf) > 0:
# rf_tmp = rf['field']/np.linalg.norm(rf['field'])
rf_tmp = rf['field']
rfs.append(rf_tmp)
rf_names.append(name)
for cf in results['models'][i].cfs:
if len(cf) > 0:
cfs.append(cf['field'][::-1, ::-1, ::-1])
cf_names.append(name)
r_train.append(results['models'][i].r_train)
r_test.append(results['models'][i].r_test)
obj_fun_val.append(results['models'][i].obj_fun_val)
tmp_dic = {'rfs': rfs,
'rf_names': rf_names,
'cfs': cfs,
'cf_names': cf_names,
'r_train': r_train,
'r_test': r_test,
'obj_fun_val': obj_fun_val}
all_fields.append(tmp_dic)
all_simulation_data.append(results['simulation_data'])
return all_fields, all_simulation_data
| mit |
justinglibert/flapai | flapai.py | 1 | 19355 | from itertools import cycle
import random
import sys
import time
import datetime
import logging
import os
import pygame
from pygame.locals import *
import matplotlib.pyplot as plt
import json
from genome import Genome
from network import Network
from population import Population
from config import Config
import pickle
import numpy as np
import subprocess
from colorama import *
today = "save/" + str(datetime.date.today()) + "_" + time.strftime("%X")
if not os.path.exists(today):
os.makedirs(today)
savestat = True
fpsspeed=3
FPS = 4000
bestFitness = 0
fitnessovergeneration = []
fittestovergeneration = []
#detectionOffset = 36
detectionOffset = 40
SCREENWIDTH = 288
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 100 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
# image and hitmask dicts
IMAGES, HITMASKS = {}, {}
DIEIFTOUCHTOP = True
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
# blue bird
(
# amount by which base can maximum shift to left
'assets/sprites/bluebird-upflap.png',
'assets/sprites/bluebird-midflap.png',
'assets/sprites/bluebird-downflap.png',
),
# yellow bird
(
'assets/sprites/yellowbird-upflap.png',
'assets/sprites/yellowbird-midflap.png',
'assets/sprites/yellowbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
'assets/sprites/background-night.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
'assets/sprites/pipe-red.png',
)
asciiart="""
______ _ ___ _____
| ___| | / _ \|_ _|
| |_ | | __ _ _ __ / /_\ \ | |
| _| | |/ _` | '_ \| _ | | |
| | | | (_| | |_) | | | |_| |_
\_| |_|\__,_| .__/\_| |_/\___/
| |
|_| """
def printc(text,color):
if color == "red":
print(Fore.RED)
if color == "blue":
print(Fore.BLUE)
if color == "green":
print(Fore.GREEN)
print(text)
print(Style.RESET_ALL)
def main():
"""Parse option"""
if len(sys.argv) != 1:
#Evaluate a single genome
if str(sys.argv[1])=="-evaluate":
initPygame("FlapAI: Evaluating")
print str(sys.argv[2])
net = load(str(sys.argv[2]))
genome = Genome(net)
global savestat
savestat = False
fitness = []
for i in xrange(100):
fit = playGame(genome)
fitness.append(fit.fitness)
print "fitness : %s " % fit.fitness
average = sum(fitness) / float(len(fitness))
printc("Average fitness : %s" % average,"red")
pygame.quit()
sys.exit()
#Show the stat of an experiment
if str(sys.argv[1])=="-stats":
showStat(str(sys.argv[2]))
#No argument, starting FlapAI
initPygame("FlapAI: Learning")
"""Init the population"""
bestFitness = 0
population = Population()
population.generateRandomPopulation()
generation = 1
maxgeneration = Config.maxGeneration
lastgenerationaveragefitness = 0
#Main Loop
while generation <= maxgeneration :
birdnmbr = 1
for i in xrange(population.size()):
genome = playGame(population.getGenome(i))
population.setGenomeFitness(i,genome.fitness)
informationforscreen = {
'generation' : generation,
'birdnumber' : birdnmbr,
'lastfitness' : genome.fitness,
'lastgenerationaveragefitness' : lastgenerationaveragefitness,
'bestfitness' : bestFitness
}
updateScreen(informationforscreen)
if genome.fitness > bestFitness:
global bestFitness
bestFitness = genome.fitness
genome.network.save(today + "/bestfitness.json")
birdnmbr += 1
global fitnessovergeneration
fitnessovergeneration.append(population.averageFitness())
lastgenerationaveragefitness = population.averageFitness()
global fittestovergeneration
fittestovergeneration.append(population.findFittest().fitness)
#Evolve the population
population.evolvePopulation()
generation += 1
def initPygame(screencaption):
global SCREEN, FPSCLOCK
pygame.init()
init()
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption(screencaption)
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
def playGame(genome):
"""Info pour le jeux"""
# select random background sprites
randBg = random.randint(0, len(BACKGROUNDS_LIST) - 1)
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = random.randint(0, len(PLAYERS_LIST) - 1)
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = random.randint(0, len(PIPES_LIST) - 1)
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
"""Info pour lancer le jeux sans le message au depart"""
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
playerShmVals = {'val': 0, 'dir': 1}
basex = 0
playerIndexGen = cycle([0, 1, 2, 1])
movementInfo = {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
}
#Update the network with current genes
genome.network.fromgenes(genome.genes)
crashInfo = mainGame(movementInfo,genome)
#fitness = showGameOverScreen(crashInfo)
genome = crashInfo['genome']
if Config.fitnessIsScore:
genome.fitness = crashInfo['score']
if genome.fitness < 0:
genome.fitness = 0
return genome
def mainGame(movementInfo,genome):
score = playerIndex = loopIter = 0
playerIndexGen = movementInfo['playerIndexGen']
playerx, playery = int(SCREENWIDTH * 0.2), movementInfo['playery']
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 200, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 200 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = -9 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
framesurvived = 0
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
#Store Stat
if savestat==True:
reportStat()
pygame.quit()
if savestat==True:
showStat(today)
else:
sys.exit()
if event.type == KEYDOWN and event.key == K_UP:
if fpsspeed < 4:
global fpsspeed
fpsspeed += 1
if event.type == KEYDOWN and event.key == K_DOWN:
if fpsspeed != -2:
global fpsspeed
fpsspeed -= 1
#Evaluate the NN
if playerx < lowerPipes[0]['x'] + detectionOffset:
nextPipe = lowerPipes[0]
else:
nextPipe = lowerPipes[1]
nextPipeY = float(SCREENHEIGHT - nextPipe['y']) / SCREENHEIGHT
playerYcorrectAxis = float(SCREENHEIGHT - playery) / SCREENHEIGHT
distanceBetweenPlayerAndNextPipe = float(nextPipe['x'] - playerx)/ SCREENWIDTH
NNinput = np.array([[playerYcorrectAxis],[nextPipeY]])
NNoutput = genome.network.feedforward(NNinput)
if NNoutput > 0.5:
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
info = {'playery': playerYcorrectAxis, 'pipey': nextPipeY, 'distance': distanceBetweenPlayerAndNextPipe}
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
upperPipes, lowerPipes)
if crashTest[0] or playery < 5:
genome.fitness = framesurvived
return {
'score': score,
'genome': genome
}
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = playerIndexGen.next()
loopIter = (loopIter + 1) % 30
basex = -((-basex + 100) % baseShift)
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
playerHeight = IMAGES['player'][playerIndex].get_height()
if playery > 5:
playery += min(playerVelY, BASEY - playery - playerHeight)
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
showScore(score)
if Config.debug:
displayInfo(info)
framesurvived += 1
SCREEN.blit(IMAGES['player'][playerIndex], (playerx, playery))
global FPS
if fpsspeed==4:
#No FPS clock ticking, may be instable
continue
if fpsspeed==3:
FPS=4000
if fpsspeed==2:
FPS=400
if fpsspeed==1:
FPS=40
if fpsspeed==0:
FPS=30
if fpsspeed==-1:
FPS=15
if fpsspeed==-2:
FPS=3
pygame.display.update()
FPSCLOCK.tick(FPS)
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def displayInfo(info):
###Display useful info : the input for the ANN
myfont = pygame.font.Font(None, 30)
# render text
playery = str(info['playery'])
tubey = str(info['pipey'])
distance = str(info['distance'])
labelplayery = myfont.render(playery,1,(255,255,0))
labeltubey = myfont.render(tubey,1,(0,255,255))
labeldistance = myfont.render(distance,1,(255,255,255))
SCREEN.blit(labelplayery, (SCREENWIDTH / 2 - 100, SCREENHEIGHT * 0.7))
SCREEN.blit(labeltubey, (SCREENWIDTH / 2 - 100, SCREENHEIGHT * 0.8))
SCREEN.blit(labeldistance, (SCREENWIDTH / 2 - 100, SCREENHEIGHT * 0.9))
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in range(image.get_width()):
mask.append([])
for y in range(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
def load(filename):
"""Load a neural network from the file ``filename``. Returns an
instance of Network.
"""
f = open(filename, "r")
data = json.load(f)
f.close()
net = Network(data["sizes"])
net.weights = [np.array(w) for w in data["weights"]]
net.biases = [np.array(b) for b in data["biases"]]
return net
def reportStat():
with open(today + '/fitnessovergeneration.dat', 'wb') as handle:
pickle.dump(fitnessovergeneration, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(today + '/fittestovergeneration.dat', 'wb') as handle:
pickle.dump(fittestovergeneration, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(today + '/bestfitness.dat', 'wb') as handle:
pickle.dump(bestFitness, handle, protocol=pickle.HIGHEST_PROTOCOL)
def updateScreen(info):
#Clear the screen
subprocess.call(["printf", "\033c"])
#Print asciiart
printc(asciiart,"green")
if info["generation"] > 1:
print("----Last generation----")
printc("Average fitness: %s" % str(info["lastgenerationaveragefitness"]), "blue")
print("-----------------------")
if info["birdnumber"] > 1:
printc("Last Fitness: %s" % str(info["lastfitness"]), "green")
printc("Best Fitness: %s" % str(info["bestfitness"]),"red")
print("----Status----")
printc("Generation number : %s/%s" % (str(info["generation"]),str(Config.maxGeneration)),"blue")
printc("Bird number: %s/%s" % (str(info["birdnumber"]), str(Config.numberOfIndividuals)),"blue")
def showStat(folder):
fitnessovergeneration = pickle.load(open(folder + '/fitnessovergeneration.dat', 'rb'))
fittestovergeneration = pickle.load(open(folder + '/fittestovergeneration.dat', 'rb'))
bestfitness = pickle.load(open(folder + '/bestfitness.dat', 'rb'))
#Clear the screen
subprocess.call(["printf", "\033c"])
printc("Statistics of %s" % folder,"blue")
printc("-" * 20,"green")
print "Number of generation: %s" % len(fittestovergeneration)
printc("Best Fitness: %s" % bestFitness,"red")
plt.figure(1)
plt.plot(fittestovergeneration)
plt.plot(fitnessovergeneration)
plt.show()
sys.exit()
if __name__ == '__main__':
main()
| mit |
nomadcube/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/tree/plot_tree_regression_multioutput.py | 43 | 1791 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
clf_1 = DecisionTreeRegressor(max_depth=2)
clf_2 = DecisionTreeRegressor(max_depth=5)
clf_3 = DecisionTreeRegressor(max_depth=8)
clf_1.fit(X, y)
clf_2.fit(X, y)
clf_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = clf_1.predict(X_test)
y_2 = clf_2.predict(X_test)
y_3 = clf_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
terkkila/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
chiffa/PolyPharma | bioflow/algorithms_bank/clustering_routines.py | 1 | 13185 | """
Mostly deprecated clustering and data structural analysis routines.
"""
from typing import Tuple
import numpy as np
from matplotlib import pyplot as plt
from scipy import sparse as spmat
from bioflow.annotation_network.knowledge_access_analysis import ref_param_set, \
get_go_interface_instance
from bioflow.configs.main_configs import NewOutputs
from bioflow.utils.dataviz import kde_compute, render_2d_matrix
from bioflow.utils.linalg_routines import cluster_nodes, normalize_laplacian, \
average_off_diag_in_sub_matrix, average_interset_linkage
def local_indexed_select(tri_array, array_column, selection_span):
"""
Convenient small function to local_indexed_select a from tri_array all the elements where the column
number array_column is within the selection span
:param tri_array: the matrix on which we will be performing the selection
:param array_column: column on which the selection span will be applied
:param selection_span: span for which we are going to keep the column.
:return:
"""
selector = np.logical_and(
selection_span[0] < tri_array[
array_column, :], tri_array[
array_column, :] < selection_span[1])
if not any(selector):
return np.array([[0.0, 0.0, 0.0]])
decvec = tri_array[:, selector]
return decvec
# REFACTOR: Legacy code containing static analysis and clustering logic
def deprectated_show_correlations(
background_curr_deg_conf,
mean_correlations,
eigenvalues,
selector,
true_sample_tri_corr_array,
test_mean_correlation,
eigenvalue,
re_samples,
go_interface_instance=None,
sparse=False,
param_set=ref_param_set,
save_path: NewOutputs = None):
# TODO: there is a lot of repetition depending on which values are the biggest,
# test-setted or real setted. In all, we should be able to reduce it to two functions:
# scatterplot and histogram with two sets that should go into the dataviz module
"""
A general function that performs demonstration of an example of random samples of the
same size as our sample
and of our sample and conducts the statistical tests on whether any of nodes or
functional groups in our sample are non-random
:param background_curr_deg_conf: [[current, informativity, confusion_potential], ...] -
characteristics of the random samples
:param mean_correlations: [[cluster size, average internode connection], ...] -
characteristics of clustering random samples with the same parameters
:param eigenvalues: eigenvalues associated to the interconnection matrix of random samples
:param selector: range on which we would like to visually zoom and plot a histogram
:param true_sample_tri_corr_array: [[current, informativity, confusion_potential], ...] -
characteristics of the true sample. If none, nothing happens
:param test_mean_correlation: [[cluster size, average internode connection], ...] -
characteristics of clustering the true sample
:param eigenvalue: eigenvalues associated to the interconnection matrix of the true sample
:param re_samples: how many random samples we analyzed for the default model
:param go_interface_instance:
:param sparse:
:param param_set:
:return:
"""
if go_interface_instance is None:
go_interface_instance = get_go_interface_instance(param_set)
inf_sel = (go_interface_instance.calculate_informativity(selector[0]),
go_interface_instance.calculate_informativity(selector[1]))
fig = plt.figure()
fig.set_size_inches(30, 20)
# trivect: [0, :] - current; [1, :] - informativity; [2, :] - confusion potential
plt.subplot(331)
plt.title('current through nodes')
bins = np.linspace(background_curr_deg_conf[0, :].min(),
background_curr_deg_conf[0, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[0, :].min(),
true_sample_tri_corr_array[0, :].min()),
max(background_curr_deg_conf[0, :].max(),
true_sample_tri_corr_array[0, :].max()),
100)
plt.hist(background_curr_deg_conf[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[0, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(332)
plt.title('test current vs pure informativity')
plt.scatter(background_curr_deg_conf[1, :],
background_curr_deg_conf[0, :], color='b', alpha=0.1)
if true_sample_tri_corr_array is not None:
plt.scatter(
true_sample_tri_corr_array[1, :],
true_sample_tri_corr_array[0, :],
color='r', alpha=0.5)
plt.axvspan(inf_sel[0], inf_sel[1], facecolor='0.5', alpha=0.3)
plt.subplot(333)
plt.title('test current v.s. confusion potential')
plt.scatter(background_curr_deg_conf[2, :], background_curr_deg_conf[0, :])
if true_sample_tri_corr_array is not None:
plt.scatter(
true_sample_tri_corr_array[2, :],
true_sample_tri_corr_array[0, :],
color='r', alpha=0.5)
plt.axvspan(selector[0], selector[1], facecolor='0.5', alpha=0.3)
plt.subplot(334)
plt.title('Gaussian KDE current_info')
estimator_function = kde_compute(background_curr_deg_conf[(1, 0), :], 50, re_samples)
current_info_rel = None
if true_sample_tri_corr_array is not None:
# Used to be the way to compute the p-values
current_info_rel = estimator_function(true_sample_tri_corr_array[(1, 0), :])
plt.subplot(335)
plt.title('GO_term pure informativity distribution')
# REFACTOR: this needs to be moved elsewhere - this is a structural analysis
bins = np.linspace(
background_curr_deg_conf[1, :].min(),
background_curr_deg_conf[1, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[1, :].min(),
true_sample_tri_corr_array[1, :].min()),
max(background_curr_deg_conf[1, :].max(),
true_sample_tri_corr_array[1, :].max()),
100)
plt.hist(background_curr_deg_conf[1, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[1, :],
bins=bins, histtype='step', log=True, color='r')
plt.subplot(336)
plt.title('Density of current in the highlighted area')
bins = np.linspace(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].min(),
local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(
min(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].min(),
local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :].min()),
max(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :].max(),
local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :].max()),
100)
plt.hist(local_indexed_select(background_curr_deg_conf, 2, selector)[0, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(local_indexed_select(true_sample_tri_corr_array, 2, selector)[0, :],
bins=bins, histtype='step', log=True, color='r')
cluster_props = None
plt.subplot(337)
plt.title('Clustering correlation')
# REFACTOR: that's the custering logic to be extracted elsewhere
if not sparse:
# plt.scatter(mean_correlations[0, :], mean_correlations[1, :], color = 'b')
estimator_function = kde_compute(mean_correlations[(0, 1), :], 50, re_samples)
cluster_props = None
if test_mean_correlation is not None:
plt.scatter(test_mean_correlation[0, :],
test_mean_correlation[1, :],
color='k', alpha=0.8)
cluster_props = estimator_function(test_mean_correlation[(0, 1), :])
plt.subplot(338)
plt.title('Eigvals_hist')
# REFACTOR: this needs to be moved elsewhere - this is a structural analysis
if not sparse:
bins = np.linspace(eigenvalues.min(), eigenvalues.max(), 100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(eigenvalues.min(), eigenvalue.min()),
max(eigenvalues.max(), eigenvalue.max()),
100)
plt.hist(eigenvalues, bins=bins, histtype='step', color='b')
if eigenvalue is not None:
plt.hist(eigenvalue.tolist() * 3, bins=bins, histtype='step', color='r')
plt.subplot(339)
plt.title('confusion potential')
bins = np.linspace(background_curr_deg_conf[2, :].min(),
background_curr_deg_conf[2, :].max(),
100)
if true_sample_tri_corr_array is not None:
bins = np.linspace(min(background_curr_deg_conf[2, :].min(),
true_sample_tri_corr_array[2, :].min()),
max(background_curr_deg_conf[2, :].max(),
true_sample_tri_corr_array[2, :].max()),
100)
plt.hist(background_curr_deg_conf[2, :],
bins=bins, histtype='step', log=True, color='b')
if true_sample_tri_corr_array is not None:
plt.hist(true_sample_tri_corr_array[2, :],
bins=bins, histtype='step', log=True, color='r')
# # plt.show()
plt.savefig(save_path.knowledge_network_scatterplot)
# pull the groups corresponding to non-random associations.
return current_info_rel, cluster_props
def deprecated_perform_clustering(inter_node_tension: spmat.csc_matrix,
cluster_number: int,
show: str = 'undefined clustering') -> Tuple[np.array, np.float64,
np.array, np.array]:
"""
Performs a clustering on the voltages of the nodes,
:param inter_node_tension:
:param cluster_number:
:param show:
"""
index_group = list(set([item
for key in inter_node_tension.keys()
for item in key]))
local_index = dict((UP, i) for i, UP in enumerate(index_group))
rev_idx = dict((i, UP) for i, UP in enumerate(index_group))
relations_matrix = spmat.lil_matrix((len(index_group), len(index_group)))
for (UP1, UP2), tension in inter_node_tension.items():
# TODO: change the metric used to cluster the nodes.
relations_matrix[local_index[UP1], local_index[UP2]] = -1.0 / tension
relations_matrix[local_index[UP2], local_index[UP1]] = -1.0 / tension
relations_matrix[local_index[UP2], local_index[UP2]] += 1.0 / tension
relations_matrix[local_index[UP1], local_index[UP1]] += 1.0 / tension
# underlying method is spectral clustering: do we really lie in a good zone for that?
# NOPE - we need a dynamic clusters number
# TODO: change clustering method to a different one
groups = cluster_nodes(relations_matrix, cluster_number)
relations_matrix = normalize_laplacian(relations_matrix)
if relations_matrix.shape[0] < 5:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix, k=2)
elif relations_matrix.shape[0] < 10:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix, k=4)
else:
eigenvals, _ = spmat.linalg.eigsh(relations_matrix)
relations_matrix = - relations_matrix
relations_matrix.setdiag(1)
group_sets = []
group_2_mean_off_diag = []
for i in range(0, cluster_number):
group_selector = groups == i
group_indexes = group_selector.nonzero()[0].tolist()
group_2_mean_off_diag.append(
(tuple(rev_idx[idx] for idx in group_indexes),
len(group_indexes),
average_off_diag_in_sub_matrix(relations_matrix, group_indexes)))
group_sets.append(group_indexes)
remainder = average_interset_linkage(relations_matrix, group_sets)
clustidx = np.array([item for itemset in group_sets for item in itemset])
relations_matrix = relations_matrix[:, clustidx]
relations_matrix = relations_matrix[clustidx, :]
mean_corr_array = np.array([[items, mean_corr]
for _, items, mean_corr in group_2_mean_off_diag])
if show:
render_2d_matrix(relations_matrix.toarray(), name=show, destination='')
return np.array(group_2_mean_off_diag), \
remainder, \
mean_corr_array, \
eigenvals | bsd-3-clause |
pompiduskus/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
JoeJimFlood/NFLPrediction2014 | week_pre_conf.py | 1 | 4840 | import pandas as pd
import matchup
import xlsxwriter
import xlautofit
import xlrd
import sys
import time
import collections
import os
week_timer = time.time()
week_number = 'conf_matrix'
matchups = collections.OrderedDict()
matchups['Matchups'] = [('NE', 'IND'),
('NE', 'SEA'),
('NE', 'GB'),
('IND', 'SEA'),
('IND', 'GB'),
('SEA', 'GB')]
location = os.getcwd().replace('\\', '/')
output_file = location + '/Weekly Forecasts/Week' + str(week_number) + '.xlsx'
for read_data in range(2):
week_book = xlsxwriter.Workbook(output_file)
header_format = week_book.add_format({'align': 'center', 'bold': True, 'bottom': True})
index_format = week_book.add_format({'align': 'right', 'bold': True})
score_format = week_book.add_format({'num_format': '#0', 'align': 'right'})
percent_format = week_book.add_format({'num_format': '#0%', 'align': 'right'})
if read_data:
colwidths = xlautofit.even_widths_single_index(output_file)
for game_time in matchups:
if read_data:
data_book = xlrd.open_workbook(output_file)
data_sheet = data_book.sheet_by_name(game_time)
sheet = week_book.add_worksheet(game_time)
sheet.write_string(1, 0, 'Chance of Winning', index_format)
sheet.write_string(2, 0, 'Expected Score', index_format)
sheet.write_string(3, 0, '2.5th Percentile Score', index_format)
sheet.write_string(4, 0, '10th Percentile Score', index_format)
sheet.write_string(5, 0, '25th Percentile Score', index_format)
sheet.write_string(6, 0, '50th Percentile Score', index_format)
sheet.write_string(7, 0, '75th Percentile Score', index_format)
sheet.write_string(8, 0, '90th Percentile Score', index_format)
sheet.write_string(9, 0, '97.5th Percentile score', index_format)
sheet.freeze_panes(0, 1)
games = matchups[game_time]
for i in range(len(games)):
home = games[i][0]
away = games[i][1]
homecol = 3 * i + 1
awaycol = 3 * i + 2
sheet.write_string(0, homecol, home, header_format)
sheet.write_string(0, awaycol, away, header_format)
if read_data:
sheet.write_number(1, homecol, data_sheet.cell(1, homecol).value, percent_format)
sheet.write_number(1, awaycol, data_sheet.cell(1, awaycol).value, percent_format)
for rownum in range(2, 10):
sheet.write_number(rownum, homecol, data_sheet.cell(rownum, homecol).value, score_format)
sheet.write_number(rownum, awaycol, data_sheet.cell(rownum, awaycol).value, score_format)
else:
results = matchup.matchup(home, away)
probwin = results['ProbWin']
sheet.write_number(1, homecol, probwin[home], percent_format)
sheet.write_number(1, awaycol, probwin[away], percent_format)
home_dist = results['Scores'][home]
away_dist = results['Scores'][away]
sheet.write_number(2, homecol, home_dist['mean'], score_format)
sheet.write_number(2, awaycol, away_dist['mean'], score_format)
sheet.write_number(3, homecol, home_dist['2.5%'], score_format)
sheet.write_number(3, awaycol, away_dist['2.5%'], score_format)
sheet.write_number(4, homecol, home_dist['10%'], score_format)
sheet.write_number(4, awaycol, away_dist['10%'], score_format)
sheet.write_number(5, homecol, home_dist['25%'], score_format)
sheet.write_number(5, awaycol, away_dist['25%'], score_format)
sheet.write_number(6, homecol, home_dist['50%'], score_format)
sheet.write_number(6, awaycol, away_dist['50%'], score_format)
sheet.write_number(7, homecol, home_dist['75%'], score_format)
sheet.write_number(7, awaycol, away_dist['75%'], score_format)
sheet.write_number(8, homecol, home_dist['90%'], score_format)
sheet.write_number(8, awaycol, away_dist['90%'], score_format)
sheet.write_number(9, homecol, home_dist['97.5%'], score_format)
sheet.write_number(9, awaycol, away_dist['97.5%'], score_format)
if i != len(games) - 1:
sheet.write_string(0, 3 * i + 3, ' ')
if read_data:
for colnum in range(sheet.dim_colmax):
sheet.set_column(colnum, colnum, colwidths[sheet.name][colnum])
week_book.close()
print('Week ' + str(week_number) + ' predictions calculated in ' + str(round((time.time() - week_timer) / 60, 2)) + ' minutes') | mit |
xzackli/isocurvature_2017 | analysis/plot_derived_parameters/OLD_make_beta_plots.py | 1 | 4054 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from scipy.stats import gaussian_kde
from pprint import pprint
import sys
import os
from astropy.io import ascii
from astropy.table import vstack
# chainfile = "/Users/zequnl/Installs/montepython_public/chains/example/2016-10-18_10000__1.txt"
# CONFIGURATION -------------
# chainfile = "chains/CDI_2/2016-11-02_1000000__1.txt"
# paramfile = "chains/CDI_2/2016-11-02_1000000_.paramnames"
chainfile = "chains/CDI_2/2016-11-02_1000000__1.txt"
paramfile = "chains/CDI_2/2016-11-02_1000000_.paramnames"
xname = 'P_{II}^1'
yname = 'P_{RI}^1'
options = ""
chainfolder = "chains/CDI_2/"
if len(sys.argv) >= 3:
chainfile = sys.argv[1]
paramfile = sys.argv[2]
print(paramfile)
if len(sys.argv) >= 5:
xname = sys.argv[3]
yname = sys.argv[4]
options = sys.argv[5:]
elif len(sys.argv) == 2:
if sys.argv[1] == "info":
params = np.array(ascii.read('params', delimiter="\t", format="no_header")['col1']).tolist()
print(params)
sys.exit(0)
# ---------------------------
params = np.array(ascii.read(paramfile, delimiter="\t", format="no_header")['col1'])
data_all = None
for filename in os.listdir(chainfolder):
if filename.startswith("201") and filename.endswith(".txt"):
chainfile = os.path.join(chainfolder, filename)
print(chainfile)
data = (ascii.read(chainfile, delimiter="\s"))[300:]
# set up column names (read in from param file)
data['col1'].name = 'acceptance'
data['col2'].name = 'likelihood'
for i in range(3,len(params)+3):
data['col' + str(i)].name = params[i-3]
if data_all == None:
data_all = data
else:
data_all = vstack( [data_all, data] )
print(len(data), len(data_all))
data = data_all
print(len(data), "rows")
x = data[xname]
y = data[yname]
t = np.array( range(len(x)))
# we look for the -s option, and then find the number afterwards.
# that's where we start
if "-s" in options:
s = int(sys.argv[sys.argv.index("-s")+1])
x = x[s:]
y = y[s:]
prr1 = data['P_{RR}^1']; pii1 = data['P_{II}^1']; pri1 = data['P_{RI}^1'];
prr2 = data['P_{RR}^2']; pii2 = data['P_{II}^2']; pri2 = pri1 * np.sqrt(pii2 * prr2 / (pii1 * prr1))
# make density plot
# sc(x,y)
beta_iso1 = pii1 / (prr1 + pii1)
beta_iso2 = pii2 / (prr2 + pii2)
alpha = pri1 / np.sqrt( pii1 * prr1 )
# \frac{\log( P_{AB}^2 / P_{AB}^1 )}{\log ( k_2 / k_1 )
k1 = 0.002 # Mpc^{-1}
k2 = 0.1 # Mpc^{-1}
nRR = np.log(prr2/prr1) / np.log(k2/k1)
nRI = np.log(pri2/pri1) / np.log(k2/k1)
nII = np.log(pii2/pii1) / np.log(k2/k1)
def denplot( list_data, ax, name="data", lower=0.0, upper=0.25, \
nbins=20, extend=False, extent=0.1, cov=0.2 ):
"""
plot a smoothed histogram
"""
x = np.linspace(lower, upper, 150)
if extend:
bools = list_data < extent
new_list_data = np.hstack( (list_data,-list_data) )
new_weights = np.hstack( (data['acceptance'], (data['acceptance']) ) )
density = gaussian_kde(new_list_data)
else:
density = gaussian_kde( list_data )
density.covariance_factor = lambda : cov
density._compute_covariance()
ax.plot( x, density(x), "k--" )
counts, bins = np.histogram( list_data, bins=x, weights=data['acceptance'], density=True )
#ax.plot( x[:-1], counts, "r." )
ax.get_yaxis().set_ticks([])
# ax.set_ylim( 0.0, counts.max() )
ax.set_xlim( lower, upper )
ax.set_xlabel( name )
fig = plt.figure(figsize=(12,3))
ax1 = fig.add_subplot(141)
ax2 = fig.add_subplot(142)
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
denplot( beta_iso1, ax1, r"$\beta_{iso}(k_{low})$", 0.0, 0.25, extend=True )
denplot( beta_iso2, ax2, r"$\beta_{iso}(k_{high})$", 0.0, 0.8, extend=True)
denplot( alpha, ax3, r"$\cos \Delta$", -0.5, 0.5 )
denplot( nII, ax4, r"$n_{II}$", -1.0, 2.8 )
plt.tight_layout()
plt.savefig("../../figures/beta_planck.pdf")
plt.show()
## TESTING
| mit |
TheTimmy/spack | var/spack/repos/builtin/packages/py-deeptools/package.py | 3 | 2155 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyDeeptools(PythonPackage):
"""deepTools addresses the challenge of handling the large amounts of data
that are now routinely generated from DNA sequencing centers."""
homepage = "https://pypi.io/packages/source/d/deepTools"
url = "https://pypi.io/packages/source/d/deepTools/deepTools-2.5.2.tar.gz"
version('2.5.2', 'ba8a44c128c6bb1ed4ebdb20bf9ae9c2')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| lgpl-2.1 |
0x0all/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 23 | 5540 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
"""Check binomial deviance loss.
Check against alternative definitions in ESLII.
"""
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
"""Check log odds estimator. """
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
"""Smoke test for init estimators with sample weights. """
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
"""Test if deviance supports sample weights. """
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
aminert/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
valix25/H4H | test_data/test2.py | 1 | 2514 | import plotly
import plotly.plotly as py
import pandas as pd
from plotly.widgets import GraphWidget
from IPython.html import widgets
from IPython.display import display, clear_output
import numpy as np
py.sign_in('dswbtest', 'ci8iu7p6wi') #plotly API credentials
food = pd.read_csv("supply.csv")
# Definition of a function that defines the plot settings
def foodmap(year):
year = str(year)
# Properties of the data and how it is displayed
data = [ dict(
type = 'choropleth',
locations = food['Country code'],
z = food[year],
text = food['Country name'],
colorscale = [[0,"rgb(51,160,44)"],
[0.5,"rgb(255,255,51)"],
[1,"rgb(227,26,28)"]],
opacity = 1,
autocolorscale = False,
reversescale = False,
marker = dict(
line = dict (
color = 'rgb(0,0,0)',
width = 0.5
)
),
colorbar = dict(
autotick = True,
title = 'kcal per capita'
),
) ]
# Properties of the plot
layout = dict(
title = 'Food Supply (kcal per capita) in ' + str(year),
geo = dict(
showframe = False,
showcoastlines = False,
showcountry = True,
countrycolor = "rgb(220, 0, 0)",
coastlinecolor = "rgb(220, 0, 0)",
landcolor = "rgb(220, 0, 0)",
projection = dict(
type = 'Mercator',
scale = 1
)
)
)
fig = dict( data=data, layout=layout )
url = py.plot( fig, validate=False, filename='d3-food-map' )
return url
# Graph object
g = GraphWidget(foodmap(1961))
# Definition of a class that will update the graph object
class z_data:
def __init__(self):
self.z = food[str(int(1961))]
def on_z_change(self, name, old_value, new_value):
self.z = food[str(int(new_value))]
self.title = "Food Supply (kcal per capita) in " + str(new_value)
self.replot()
def replot(self):
g.restyle({ 'z': [self.z] })
g.relayout({'title': self.title})
# Interactive object
edu_slider = widgets.IntSlider(min=1961,max=2011,value=1961,step=1)
edu_slider.description = 'Year'
edu_slider.value = 1961
z_state = z_data()
edu_slider.on_trait_change(z_state.on_z_change, 'value')
display(edu_slider)
display(g) | mit |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_04_27_2015_parallel_for_final.py | 1 | 43408 |
# coding: utf-8
# In[5]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[6]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# set settings for this script
settings = {}
settings['filename'] = 'ddi_examples_40_60_over2top_diff_name_2014.txt'
settings['fisher_mode'] = 'FisherM1ONLY'# settings['fisher_mode'] = 'FisherM1ONLY'
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['DL'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_COMBO'] = 1
settings['SVM_RBF'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_RBF_COMBO'] = 1
settings['SVM_POLY'] = 0
settings['DL_S'] = 1
settings['DL_U'] = 0
settings['finetune_lr'] = 1
settings['batch_size'] = 100
settings['pretraining_interations'] = 5002
settings['pretrain_lr'] = 0.001
settings['training_epochs'] = 20000 # change epochs for split net
settings['hidden_layers_sizes'] = [100, 100]
settings['corruption_levels'] = [0, 0]
filename = settings['filename']
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[6]:
# In[7]:
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename, 'float32')
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename, 'float32')
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[7]:
# In[7]:
# In[8]:
import sklearn.preprocessing
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if with_auc_score == False:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def LOO_out_performance_for_all(ddis):
for ddi in ddis:
try:
one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
one_ddi_family.get_LOO_perfermance(settings = settings)
except Exception,e:
print str(e)
logger.info("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
class LOO_out_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_LOO_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
print seq_no
logger.info('sequence number: ' + str(seq_no))
if settings['SVM']:
print "SVM"
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no,fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# Deep learning part
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_epochs'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
# direct deep learning
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = 1500, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs_for_reduced = cal_epochs(1500, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs_for_reduced,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_epochs'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' +str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(y_test, test_predicted, with_auc_score), analysis_scr)
# In[9]:
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
try:
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance(settings=settings)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10, shuffle = True)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
#### new prepresentation
x = X_train_pre_validation_minmax
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
print 'SAE followed by SVM RBF'
x = X_train_pre_validation_minmax
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_COMBO']:
print 'SAE followed by SVM with combo feature'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_combo, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF_COMBO']:
print 'SAE followed by SVM RBF with combo feature'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_combo, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_U']:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
training_epochs = 20001
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_test10fold_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' + str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
# In[10]:
#LOO_out_performance_for_all(ddis)
#LOO_out_performance_for_all(ddis)
from multiprocessing import Pool
pool = Pool(8)
pool.map(process_one_ddi_tenfold, ddis[:])
pool.close()
pool.join()
# In[25]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
| gpl-2.0 |
Erotemic/plottool | plottool_ibeis/interact_annotations.py | 1 | 52275 | """
Interactive tool to draw mask on an image or image-like array.
TODO:
* need concept of subannotation
* need to take options on a right click of an annotation
* add support for arbitrary polygons back in .
* rename species_list to label_list or category_list
* Just use metadata instead of species / category / label
# Need to incorporate parts into metadata
Notes:
3. Change bounding box and update continuously to the original image the
new ANNOTATIONs
2. Make new window and frames inside, double click to pull up normal window
with editing start with just taking in 6 images and ANNOTATIONs
1. ANNOTATION ID number, then list of 4 tuples
python -m utool.util_inspect check_module_usage --pat="interact_annotations.py"
References:
Adapted from matplotlib/examples/event_handling/poly_editor.py
Jan 9 2014: taken from: https://gist.github.com/tonysyu/3090704
CommandLine:
python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show
"""
from __future__ import absolute_import, division, print_function
import six
import re
import numpy as np
try:
import vtool_ibeis as vt
except ImportError:
pass
import utool as ut
import itertools as it
import matplotlib as mpl
from six.moves import zip, range
from plottool_ibeis import draw_func2 as df2
from plottool_ibeis import abstract_interaction
print, rrr, profile = ut.inject2(__name__)
DEFAULT_SPECIES_TAG = '____'
# FIXE THESE TO BE GENERIC
ACCEPT_SAVE_HOTKEY = None # 'ctrl+a'
ADD_RECTANGLE_HOTKEY = 'ctrl+a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'ctrl+f'
DEL_RECTANGLE_HOTKEY = 'ctrl+d' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 'ctrl+t'
HACK_OFF_SPECIES_TYPING = True
if HACK_OFF_SPECIES_TYPING:
ADD_RECTANGLE_HOTKEY = 'a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'f'
DEL_RECTANGLE_HOTKEY = 'd' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 't'
NEXT_IMAGE_HOTKEYS = ['right', 'pagedown']
PREV_IMAGE_HOTKEYS = ['left', 'pageup']
TAU = np.pi * 2
class AnnotPoly(mpl.patches.Polygon, ut.NiceRepr):
"""
Helper to represent an annotation polygon
ibeis --aidcmd='Interact image' --aid=1
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> verts = vt.verts_from_bbox([0, 0, 10, 10])
>>> poly = AnnotPoly(None, 0, verts, 0, '____')
"""
def __init__(poly, ax, num, verts, theta, species, fc=(0, 0, 0),
line_color=(1, 1, 1), line_width=4, is_orig=False,
metadata=None, valid_species=None, manager=None):
super(AnnotPoly, poly).__init__(verts, animated=True, fc=fc, ec='none',
alpha=0)
poly.manager = manager
# Ensure basecoords consistency
poly.basecoords = vt.verts_from_bbox(vt.bbox_from_verts(poly.xy))
#poly.basecoords = poly.xy
poly.num = num
poly.is_orig = is_orig
poly.theta = theta
poly.metadata = metadata
poly.valid_species = valid_species
poly.tab_list = valid_species
# put in previous text and tabcomplete list for autocompletion
poly.tctext = ''
poly.tcindex = 0
poly.anchor_idx = 2
poly.child_polys = {}
# Display stuff that should be removed from constructor
poly.xy = calc_display_coords(poly.basecoords, poly.theta)
poly.lines = poly._make_lines(line_color, line_width)
poly.handle = poly._make_handle_line()
poly.species = species
if ax is not None:
poly.axes_init(ax)
def axes_init(poly, ax):
species = poly.species
metadata = poly.metadata
if isinstance(metadata, ut.LazyDict):
metadata_ = ut.dict_subset(metadata, metadata.cached_keys())
else:
metadata_ = metadata
poly.species_tag = ax.text(
#tagpos[0], tagpos[1],
0, 0,
species,
bbox={'facecolor': 'white', 'alpha': .8},
verticalalignment='top',
)
poly.metadata_tag = ax.text(
0, 0,
#tagpos[0] + 5, tagpos[1] + 80,
ut.repr3(metadata_, nobr=True),
bbox={'facecolor': 'white', 'alpha': .7},
verticalalignment='top',
)
# ???
poly.species_tag.remove() # eliminate "leftover" copies
poly.metadata_tag.remove()
#
poly.update_display_coords()
def move_to_back(poly):
# FIXME: doesnt work exactly
# Probalby need to do in the context of other polys
zorder = 0
poly.set_zorder(zorder)
poly.lines.set_zorder(zorder)
poly.handle.set_zorder(zorder)
def __nice__(poly):
return '(num=%r)' % (poly.num)
def add_to_axis(poly, ax):
ax.add_patch(poly)
ax.add_line(poly.lines)
ax.add_line(poly.handle)
def remove_from_axis(poly, ax):
poly.remove()
poly.lines.remove()
poly.handle.remove()
def draw_self(poly, ax, show_species_tags=False, editable=True):
ax.draw_artist(poly)
if not editable and poly.lines.get_marker():
poly.lines.set_marker('')
elif editable and not poly.lines.get_marker():
poly.lines.set_marker('o')
ax.draw_artist(poly.lines)
if editable:
ax.draw_artist(poly.handle)
if editable and show_species_tags:
# Hack to fix matplotlib 1.5 bug
poly.species_tag.figure = ax.figure
poly.metadata_tag.figure = ax.figure
ax.draw_artist(poly.species_tag)
ax.draw_artist(poly.metadata_tag)
def _make_lines(poly, line_color, line_width):
""" verts - list of (x, y) tuples """
_xs, _ys = list(zip(*poly.xy))
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color,
'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def _make_handle_line(poly):
_xs, _ys = list(zip(*poly.calc_handle_display_coords()))
line_width = 4
line_color = (0, 1, 0)
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color, 'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def calc_tag_position(poly):
r"""
CommandLine:
python -m plottool_ibeis.interact_annotations --test-calc_tag_position --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> poly = ut.DynStruct()
>>> poly.basecoords = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> poly.theta = 0
>>> poly.xy = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> tagpos = poly.calc_tag_position()
>>> print('tagpos = %r' % (tagpos,))
"""
points = [[
max(list(zip(*poly.basecoords))[0]),
min(list(zip(*poly.basecoords))[1])
]]
tagpos = rotate_points_around(points, poly.theta, *points_center(poly.xy))[0]
return tagpos
def calc_handle_display_coords(poly):
img_h = poly.manager.img.shape[0]
handle_length = img_h // 32
#MIN_HANDLE_LENGTH = 25
#handle_length = MIN_HANDLE_LENGTH
#handle_length = max(MIN_HANDLE_LENGTH, (h / 4))
cx, cy = points_center(poly.xy)
w, h = vt.get_pointset_extent_wh(np.array(poly.basecoords))
x0, y0 = cx, (cy - (h / 2)) # start at top edge
x1, y1 = (x0, y0 - handle_length)
pts = [(x0, y0), (x1, y1)]
pts = rotate_points_around(pts, poly.theta, cx, cy)
return pts
def update_color(poly, selected=False, editing_parts=False):
if editing_parts:
poly.lines.set_color(df2.PINK)
elif selected:
# Add selected color
sel_color = df2.ORANGE if poly.is_orig else df2.LIGHT_BLUE
poly.lines.set_color(sel_color)
else:
line = poly.lines
line_color = line.get_color()
desel_color = df2.WHITE if poly.is_orig else df2.LIGHTGRAY
if np.any(line_color != np.array(desel_color)):
line.set_color(np.array(desel_color))
def update_lines(poly):
poly.lines.set_data(list(zip(*poly.xy)))
poly.handle.set_data(list(zip(*poly.calc_handle_display_coords())))
def set_species(poly, text):
poly.tctext = text
poly.species_tag.set_text(text)
def increment_species(poly, amount=1):
if len(poly.tab_list) > 0:
tci = (poly.tcindex + amount) % len(poly.tab_list)
poly.tcindex = tci
# All tab is going to do is go through the possibilities
poly.species_tag.set_text(poly.tab_list[poly.tcindex])
def resize_poly(poly, x, y, idx, ax):
"""
Resize a rectangle using idx as the given anchor point. Respects
current rotation.
CommandLine:
python -m plottool_ibeis.interact_annotations --exec-resize_poly --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> (h, w) = img.shape[0:2]
>>> x1, y1 = 10, 10
>>> x2, y2 = w - 10, h - 10
>>> coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
>>> x = 3 * w / 4
>>> y = 3 * h / 4
>>> idx = 3
>>> resize_poly(poly, x, y, idx)
>>> update_UI()
>>> import plottool_ibeis as pt
>>> pt.show_if_requested()
"""
# TODO: allow resize by middle click to scale from the center
# the minus one is because the last coordinate is duplicated (by
# matplotlib) to get a closed polygon
tmpcoords = poly.xy[:-1]
idx = idx % len(tmpcoords)
previdx = (idx - 1) % len(tmpcoords)
nextidx = (idx + 1) % len(tmpcoords)
(dx, dy) = (x - poly.xy[idx][0], y - poly.xy[idx][1])
# Fudge factor is due to gravity vectors constants
fudge_factor = (idx) * TAU / 4
poly_theta = poly.theta + fudge_factor
polar_idx2prev = polarDelta(tmpcoords[idx], tmpcoords[previdx])
polar_idx2next = polarDelta(tmpcoords[idx], tmpcoords[nextidx])
tmpcoords[idx] = (tmpcoords[idx][0] + dx, tmpcoords[idx][1] + dy)
mag_delta = np.linalg.norm((dx, dy))
theta_delta = np.arctan2(dy, dx)
theta_rot = theta_delta - (poly_theta + TAU / 4)
rotx = mag_delta * np.cos(theta_rot)
roty = mag_delta * np.sin(theta_rot)
polar_idx2prev[0] -= rotx
polar_idx2next[0] += roty
tmpcoords[previdx] = apply_polarDelta(polar_idx2prev, tmpcoords[idx])
tmpcoords[nextidx] = apply_polarDelta(polar_idx2next, tmpcoords[idx])
# rotate the points by -theta to get the "unrotated" points for use as
# basecoords
tmpcoords = rotate_points_around(tmpcoords, -poly.theta,
*points_center(poly.xy))
# ensure the poly is closed, matplotlib might do this, but I'm not sure
# if it preserves the ordering we depend on, even if it does add the
# point
tmpcoords = tmpcoords[:] + [tmpcoords[0]]
dispcoords = calc_display_coords(tmpcoords, poly.theta)
if (check_valid_coords(ax, dispcoords) and check_min_wh(tmpcoords)):
poly.basecoords = tmpcoords
poly.update_display_coords()
def rotate_poly(poly, dtheta, ax):
coords_lis = calc_display_coords(poly.basecoords, poly.theta + dtheta)
if check_valid_coords(ax, coords_lis):
poly.theta += dtheta
poly.update_display_coords()
def move_poly(poly, dx, dy, ax):
new_coords = [(x + dx, y + dy) for (x, y) in poly.basecoords]
coords_list = calc_display_coords(new_coords, poly.theta)
if check_valid_coords(ax, coords_list):
poly.basecoords = new_coords
poly.update_display_coords()
def update_display_coords(poly):
poly.xy = calc_display_coords(poly.basecoords, poly.theta)
tag_pos = poly.calc_tag_position()
poly.species_tag.set_position((tag_pos[0] + 5, tag_pos[1]))
poly.metadata_tag.set_position((tag_pos[0] + 5, tag_pos[1] + 50))
def print_info(poly):
print('poly = %r' % (poly,))
print('poly.tag_text = %r' % (poly.species_tag.get_text(),))
print('poly.metadata = %r' % (poly.metadata,))
def get_poly_mask(poly, shape):
h, w = shape[0:2]
y, x = np.mgrid[:h, :w]
points = np.transpose((x.ravel(), y.ravel()))
verts = poly.xy
path = mpl.path.Path(verts)
mask = path.contains_points(points)
#mask = nxutils.points_inside_poly(points, verts)
return mask.reshape(h, w)
def is_near_handle(poly, xy_pt, max_dist):
line = poly.calc_handle_display_coords()
return is_within_distance_from_line(xy_pt, line, max_dist)
@property
def size(poly):
return vt.bbox_from_verts(poly.xy)[2:4]
@six.add_metaclass(ut.ReloadingMetaclass)
class AnnotationInteraction(abstract_interaction.AbstractInteraction):
"""
An interactive polygon editor.
SeeAlso:
ibeis.viz.interact.interact_annotations2
(ensure that any updates here are propogated there)
Args:
verts_list (list) : list of lists of (float, float)
List of (x, y) coordinates used as vertices of the polygon.
"""
# --- Initialization and Figure Widgets
def __init__(self, img, img_ind=None, commit_callback=None,
verts_list=None,
bbox_list=None,
theta_list=None,
species_list=None,
metadata_list=None,
line_width=4, line_color=(1, 1, 1), face_color=(0, 0, 0),
fnum=None, default_species=DEFAULT_SPECIES_TAG,
next_callback=None, prev_callback=None, do_mask=False,
valid_species=[],
**kwargs):
super(AnnotationInteraction, self).__init__(fnum=fnum, **kwargs)
self.valid_species = valid_species
self.commit_callback = commit_callback # commit_callback
self.but_width = .14
#self.but_height = .08
self.next_prev_but_height = .08
self.but_height = self.next_prev_but_height - .01
self.callback_funcs = dict([
('close_event', self.on_close),
('draw_event', self.draw_callback),
('button_press_event', self.on_click),
('button_release_event', self.on_click_release),
('figure_leave_event', self.on_figure_leave),
('key_press_event', self.on_key_press),
('motion_notify_event', self.on_motion),
('pick_event', self.on_pick),
#('resize_event', self.on_resize),
])
self.mpl_callback_ids = {}
self.img = img
self.show_species_tags = True
self.max_dist = 10
def _reinitialize_variables():
self.do_mask = do_mask
self.img_ind = img_ind
self.species_tag = default_species
self.showverts = True
self.fc_default = face_color
self.mouseX = None # mouse X coordinate
self.mouseY = None # mouse Y coordinate
self.ind_xy = None
self._autoinc_polynum = it.count(0) # num polys in image
self._poly_held = False # if any poly is active
self._selected_poly = None # active polygon
self.parent_poly = None # level of parts heirarchy
self.background = None
# Ensure nothing is down
self.reset_mouse_state()
_reinitialize_variables()
# hack involving exploting lexical scoping to save defaults for a
# restore operation
self.reinitialize_variables = _reinitialize_variables
try:
self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)
df2.close_figure(self.fig)
except AttributeError:
pass
self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)
self.reinitialize_figure(fnum=self.fnum)
assert verts_list is None or bbox_list is None, 'only one can be specified'
# bbox_list will get converted to verts_list
if verts_list is not None:
bbox_list = vt.bboxes_from_vert_list(verts_list)
if bbox_list is not None:
verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]
if theta_list is None:
theta_list = [0 for _ in verts_list]
if species_list is None:
species_list = [self.species_tag for _ in verts_list]
if metadata_list is None:
metadata_list = [None for _ in verts_list]
# Create the list of polygons
self.handle_polygon_creation(bbox_list, theta_list, species_list, metadata_list)
self._ind = None # the active vert
self._current_rotate_poly = None
self.mpl_callback_ids = {}
self.connect_mpl_callbacks(self.fig.canvas)
self.add_action_buttons()
self.update_callbacks(next_callback, prev_callback)
def reinitialize_figure(self, fnum=None):
self.fig.clear()
self.fig.clf()
#self.fig.cla()
#ut.qflag()
self.fnum = fnum
#print(self.fnum)
ax = df2.gca()
#self.fig.ax = ax
self.ax = ax
df2.remove_patches(self.ax)
df2.imshow(self.img, fnum=fnum)
ax.set_clip_on(False)
ax.set_title(('\n'.join([
'Click and drag to select/move/resize/orient an ANNOTATION',
#'Press enter to clear the species tag of the selected ANNOTATION',
'Press tab to cycle through annotation species',
#'Type to edit the ANNOTATION species (press tab to autocomplete)'
])))
def add_action_buttons(self):
self.append_button(
'Add Annotation\n' + pretty_hotkey_map(ADD_RECTANGLE_HOTKEY),
rect=[0.18, 0.015, self.but_width, self.but_height],
callback=self.add_new_poly
)
# self.append_button(
# 'Add Full Annotation\n' + pretty_hotkey_map(ADD_RECTANGLE_FULL_HOTKEY),
# rect=[0.34, 0.015, self.but_width, self.but_height],
# callback=ut.partial(self.add_new_poly, full=True)
# )
self.append_button(
'Delete Annotation\n' + pretty_hotkey_map(DEL_RECTANGLE_HOTKEY),
rect=[0.50, 0.015, self.but_width, self.but_height],
callback=self.delete_current_poly
)
self.append_button(
'Save and Exit\n' + pretty_hotkey_map(ACCEPT_SAVE_HOTKEY),
rect=[0.66, 0.015, self.but_width, self.but_height],
callback=self.save_and_exit
)
def disconnect_mpl_callbacks(self, canvas):
""" disconnects all connected matplotlib callbacks """
for name, callbackid in six.iteritems(self.mpl_callback_ids):
canvas.mpl_disconnect(callbackid)
self.mpl_callback_ids = {}
def connect_mpl_callbacks(self, canvas):
""" disconnects matplotlib callbacks specified in the
self.mpl_callback_ids dict """
#http://matplotlib.org/1.3.1/api/backend_bases_api.html
# Create callback ids
self.disconnect_mpl_callbacks(canvas)
self.mpl_callback_ids = {
name: canvas.mpl_connect(name, func)
for name, func in six.iteritems(self.callback_funcs)
}
self.fig.canvas = canvas
# --- Updates
def update_callbacks(self, next_callback, prev_callback):
self.prev_callback = prev_callback
self.next_callback = next_callback
# Hack because the callbacks actually need to be wrapped
_next_callback = None if self.next_callback is None else self.next_image
_prev_callback = None if self.prev_callback is None else self.prev_image
self.append_button(
'Previous Image\n' + pretty_hotkey_map(PREV_IMAGE_HOTKEYS),
rect=[0.02, 0.01, self.but_width, self.next_prev_but_height],
callback=_prev_callback,
)
self.append_button(
'Next Image\n' + pretty_hotkey_map(NEXT_IMAGE_HOTKEYS),
rect=[0.82, 0.01, self.but_width, self.next_prev_but_height],
callback=_next_callback,
)
def update_image_and_callbacks(self, img, bbox_list, theta_list,
species_list, metadata_list, next_callback,
prev_callback):
self.disconnect_mpl_callbacks(self.fig.canvas)
for poly in six.itervalues(self.polys):
poly.remove()
self.polys = {}
self.reinitialize_variables()
self.img = img
self.reinitialize_figure(fnum=self.fnum)
self.handle_polygon_creation(bbox_list, theta_list, species_list,
metadata_list)
self.add_action_buttons()
self.draw()
self.connect_mpl_callbacks(self.fig.canvas)
self.update_callbacks(next_callback, prev_callback)
print('[interact_annot] drawing')
self.draw()
self.update_UI()
def _update_poly_colors(self):
for poly in six.itervalues(self.uneditable_polys):
poly.update_color()
for ind, poly in six.iteritems(self.editable_polys):
assert poly.num == ind
selected = poly is self._selected_poly
editing_parts = poly is self.parent_poly
poly.update_color(selected, editing_parts)
self.draw()
def _update_poly_lines(self):
for poly in six.itervalues(self.uneditable_polys):
#self.last_vert_ind = len(poly.xy) - 1
poly.update_lines()
for poly in six.itervalues(self.editable_polys):
self.last_vert_ind = len(poly.xy) - 1
poly.update_lines()
def update_UI(self):
self._update_poly_lines()
self._update_poly_colors()
self.fig.canvas.restore_region(self.background)
self.draw_artists()
self.fig.canvas.blit(self.ax.bbox)
def draw_artists(self):
for poly in six.itervalues(self.uneditable_polys):
poly.draw_self(self.ax, editable=False)
for poly in six.itervalues(self.editable_polys):
poly.draw_self(self.ax, self.show_species_tags)
# --- Data Matainence / Other
@property
def uneditable_polys(self):
if self.in_edit_parts_mode:
return {self.parent_poly.num: self.parent_poly}
#return self.polys
else:
return {}
@property
def editable_polys(self):
#return self.polys
if self.in_edit_parts_mode:
return self.parent_poly.child_polys
else:
if self.polys is None:
self.polys = {}
return self.polys
def get_poly_under_cursor(self, x, y):
"""
get the index of the vertex under cursor if within max_dist tolerance
"""
# Remove any deleted polygons
poly_dict = {k: v for k, v in self.editable_polys.items() if v is not None}
if len(poly_dict) > 0:
poly_inds = list(poly_dict.keys())
poly_list = ut.take(poly_dict, poly_inds)
# Put polygon coords into figure space
poly_pts = [poly.get_transform().transform(np.asarray(poly.xy))
for poly in poly_list]
# Find the nearest vertex from the annotations
ind_dist_list = [vt.nearest_point(x, y, polypts)
for polypts in poly_pts]
dist_lists = ut.take_column(ind_dist_list, 1)
min_idx = np.argmin(dist_lists)
sel_polyind = poly_inds[min_idx]
sel_vertx, sel_dist = ind_dist_list[min_idx]
# Ensure nearest distance is within threshold
if sel_dist >= self.max_dist ** 2:
sel_polyind, sel_vertx = (None, None)
else:
sel_polyind, sel_vertx = (None, None)
return sel_polyind, sel_vertx
def get_most_recently_added_poly(self):
if len(self.editable_polys) == 0:
return None
else:
# most recently added polygon has the highest index
poly_ind = max(list(self.editable_polys.keys()))
return self.editable_polys[poly_ind]
def new_polygon(self, verts, theta, species, fc=(0, 0, 0),
line_color=(1, 1, 1), line_width=4, is_orig=False,
metadata=None):
""" verts - list of (x, y) tuples """
# create new polygon from verts
num = six.next(self._autoinc_polynum)
poly = AnnotPoly(ax=self.ax, num=num, verts=verts, theta=theta,
species=species, fc=fc, line_color=line_color,
line_width=line_width, is_orig=is_orig,
metadata=metadata, valid_species=self.valid_species,
manager=self)
poly.set_picker(self.is_poly_pickable)
return poly
def handle_polygon_creation(self, bbox_list, theta_list, species_list,
metadata_list):
""" Maintain original input """
assert bbox_list is not None
if theta_list is None:
theta_list = [0.0 for _ in range(len(bbox_list))]
if species_list is None:
species_list = ['' for _ in range(len(bbox_list))]
assert len(bbox_list) == len(theta_list), 'inconconsitent data1'
assert len(bbox_list) == len(species_list), 'inconconsitent data2'
assert len(bbox_list) == len(metadata_list), 'inconconsitent data2'
self.original_indices = list(range(len(bbox_list)))
self.original_bbox_list = bbox_list
self.original_theta_list = theta_list
self.original_species_list = species_list
self.original_metadata_list = metadata_list
# Convert bbox to verticies
verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]
for verts in verts_list:
verts = np.array(verts)
for vert in verts:
enforce_dims(self.ax, vert)
# Create polygons
poly_list = [self.new_polygon(verts_, theta, species, is_orig=True,
metadata=metadata)
for (verts_, theta, species, metadata) in
zip(verts_list, theta_list, species_list, metadata_list)]
self.polys = {poly.num: poly for poly in poly_list}
if len(self.polys) != 0:
# Select poly with largest area
wh_list = np.array([poly.size for poly in six.itervalues(self.polys)])
poly_index = list(self.polys.keys())[wh_list.prod(axis=1).argmax()]
self._selected_poly = self.polys[poly_index]
self._update_poly_colors()
self._update_poly_lines()
else:
self._selected_poly = None
# Add polygons to the axis
for poly in six.itervalues(self.polys):
poly.add_to_axis(self.ax)
# Give polygons mpl change callbacks
#for poly in six.itervalues(self.polys):
# poly.add_callback(self.poly_changed)
# --- Actions
def add_new_poly(self, event=None, full=False):
""" Adds a new annotation to the image """
if full:
(h, w) = self.img.shape[0:2]
x1, y1 = 1, 1
x2, y2 = w - 1, h - 1
coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
if self._selected_poly is not None:
defaultshape_polys = {
self._selected_poly.num:
self._selected_poly
}
else:
defaultshape_polys = self.editable_polys
coords = default_vertices(self.img, defaultshape_polys,
self.mouseX, self.mouseY)
poly = self.new_polygon(verts=coords, theta=0,
species=self.species_tag)
poly.parent = self.parent_poly
# Add to the correct place in current heirarchy
self.editable_polys[poly.num] = poly
poly.add_to_axis(self.ax)
#self.polys[poly.num] = poly
#poly.add_callback(self.poly_changed)
self._ind = None # the active vert
self._selected_poly = self.get_most_recently_added_poly()
self._update_poly_lines()
self._update_poly_colors()
self.draw()
def delete_current_poly(self, event=None):
"""
Removes an annotation
"""
if self._selected_poly is None:
print('[interact_annot] No polygon selected to delete')
else:
print('[interact_annot] delete annot')
poly = self._selected_poly
#self.polys.pop(poly.num)
del self.editable_polys[poly.num]
# remove the poly from the figure itself
poly.remove_from_axis(self.ax)
#reset anything that has to do with current poly
self._selected_poly = self.get_most_recently_added_poly()
self._poly_held = False
if self._selected_poly is not None:
self._update_poly_colors()
self.draw()
def edit_poly_parts(self, poly):
if poly is None and self.parent_poly is not None:
self._selected_poly = self.parent_poly
print('self.parent_poly = %r' % (self.parent_poly,))
self.parent_poly = poly
if poly is not None:
self._selected_poly = self.get_most_recently_added_poly()
print('self._selected_poly = %r' % (self._selected_poly,))
if poly is None:
self.ax.imshow(vt.convert_colorspace(self.img, 'RGB'))
else:
# Mask the part of the image not belonging to the annotation
mask = poly.get_poly_mask(self.img.shape)
masked_img = apply_mask(self.img, mask)
self.ax.imshow(vt.convert_colorspace(masked_img, 'RGB'))
self._update_poly_colors()
@property
def in_edit_parts_mode(self):
return self.parent_poly is not None
def toggle_species_label(self):
print('[interact_annot] toggle_species_label()')
self.show_species_tags = not self.show_species_tags
self.update_UI()
def save_and_exit(self, event, do_close=True):
"""
The Save and Exit Button
write a callback to redraw viz for bbox_list
"""
print('[interact_annot] Pressed Accept Button')
def _get_annottup_list():
annottup_list = []
indices_list = []
#theta_list = []
for poly in six.itervalues(self.polys):
assert poly is not None
index = poly.num
bbox = tuple(map(int, vt.bbox_from_verts(poly.basecoords)))
theta = poly.theta
species = poly.species_tag.get_text()
annottup = (bbox, theta, species)
indices_list.append(index)
annottup_list.append(annottup)
return indices_list, annottup_list
def _send_back_annotations():
print('[interact_annot] _send_back_annotations')
indices_list, annottup_list = _get_annottup_list()
# Delete if index is in original_indices but no in indices_list
deleted_indices = list(set(self.original_indices) -
set(indices_list))
changed_indices = []
unchanged_indices = [] # sanity check
changed_annottups = []
new_annottups = []
original_annottup_list = list(zip(self.original_bbox_list,
self.original_theta_list,
self.original_species_list))
for index, annottup in zip(indices_list, annottup_list):
# If the index is not in the originals then it is new
if index not in self.original_indices:
new_annottups.append(annottup)
else:
if annottup not in original_annottup_list:
changed_annottups.append(annottup)
changed_indices.append(index)
else:
unchanged_indices.append(index)
self.commit_callback(unchanged_indices, deleted_indices,
changed_indices, changed_annottups,
new_annottups)
if self.commit_callback is not None:
_send_back_annotations()
# Make mask from selection
if self.do_mask is True:
self.fig.clf()
self.ax = ax = self.fig.subplot(111)
mask_list = [poly.get_poly_mask(self.img.shape)
for poly in six.itervalues(self.polys)]
if len(mask_list) == 0:
print('[interact_annot] No polygons to make mask out of')
return 0
mask = mask_list[0]
for mask_ in mask_list:
mask = np.maximum(mask, mask_)
#mask = self.get_poly_mask()
# User must close previous figure
# Modify the image with the mask
masked_img = apply_mask(self.img, mask)
# show the modified image
ax.imshow(masked_img)
ax.title('Region outside of mask is darkened')
ax.figure.show()
return
print('[interact_annot] Accept Over')
if do_close:
df2.close_figure(self.fig)
# --- Connected Slots and Callbacks
def next_image(self, event):
if self.next_callback is not None:
self.next_callback()
def prev_image(self, event):
if self.prev_callback is not None:
self.prev_callback()
def start(self):
# FIXME: conform to abstract_interaction start conventions
#self._ensure_running()
#self.show_page()
self.show()
def show(self):
self.draw()
self.bring_to_front()
def draw_callback(self, event):
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.draw_artists()
def _show_poly_context_menu(self, event):
def _make_options():
metadata = self._selected_poly.metadata
options = []
options += [
#('Foo: ', ut.partial(print, 'bar')),
#('Move to back ', self._selected_poly.move_to_back),
('PolyInfo: ', self._selected_poly.print_info),
]
if isinstance(metadata, ut.LazyDict):
options += metadata.nocache_eval('annot_context_options')
return options
options = _make_options()
self.show_popup_menu(options, event)
def is_poly_pickable(self, artist, event):
if artist.num in self.editable_polys:
mouse_xy = event.x, event.y
hit = artist.contains_point(mouse_xy)
else:
hit = False
#import utool
#utool.embed()
props = {'dblclick': event.dblclick}
return hit, props
def on_pick(self, event):
""" Makes selected polygon translucent """
if self.debug > 0 or True:
print('[interact_annot] on_pick')
if not self._poly_held:
artist = event.artist
print('[interact_annot] picked artist = %r' % (artist,))
self._selected_poly = artist
self._poly_held = True
if event.dblclick and not self.in_edit_parts_mode:
self.edit_poly_parts(self._selected_poly)
pass
#x, y = event.mouseevent.xdata, event.mouseevent.xdata
def on_click(self, event):
"""
python -m ibeis.viz.interact.interact_annotations2 --test-ishow_image2 --show
"""
super(AnnotationInteraction, self).on_click(event)
if self._ind is not None:
self._ind = None
return
if not self.showverts:
return
if event.inaxes is None:
return
if len(self.editable_polys) == 0:
print('[interact_annot] No polygons on screen')
return
# Right click - context menu
if event.button == self.RIGHT_BUTTON:
self._show_poly_context_menu(event)
# Left click, indicate that a mouse button is down
if event.button == self.LEFT_BUTTON:
#if event.dblclick and not self.in_edit_parts_mode:
# # On double click enter a single annotation to annotation parts
# #print("DOUBLECLICK")
# #self.edit_poly_parts(self._selected_poly)
if event.key == 'shift':
self._current_rotate_poly = self._selected_poly
else:
# Determine if we are clicking the rotation line
mouse_xy = (event.xdata, event.ydata)
for poly in six.itervalues(self.editable_polys):
if poly.is_near_handle(mouse_xy, self.max_dist):
self._current_rotate_poly = poly
break
if event.dblclick:
# Reset rotation
if self._current_rotate_poly is not None:
self._current_rotate_poly.theta = 0
self._current_rotate_poly.update_display_coords()
polyind, self._ind = self.get_poly_under_cursor(event.x, event.y)
if self._ind is not None and polyind is not None:
self._selected_poly = self.editable_polys[polyind]
if self._selected_poly is None:
return
self.ind_xy = self._selected_poly.xy[self._ind]
self._poly_held = True
self._selected_poly.anchor_idx = self._ind
self.mouseX, self.mouseY = event.xdata, event.ydata
if self._poly_held is True or self._ind is not None:
self._selected_poly.set_alpha(.2)
self._update_poly_colors()
self._update_poly_colors()
self._update_poly_lines()
if self.background is not None:
self.fig.canvas.restore_region(self.background)
else:
print('[interact_annot] error: self.background is none.'
' Trying refresh.')
self.fig.canvas.restore_region(self.background)
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
# Redraw blitted objects
self.draw_artists()
self.fig.canvas.blit(self.ax.bbox)
def on_motion(self, event):
if ut.VERBOSE:
print('[interact_annot] on_motion')
print('[interact_annot] Got key: %r' % event.key)
super(AnnotationInteraction, self).on_motion(event)
# uses boolean punning for terseness
lastX = self.mouseX or None
lastY = self.mouseY or None
# Allow for getting coordinates outside the axes
ax = self.ax
mousePos = [event.x, event.y]
self.mouseX, self.mouseY = ax.transData.inverted().transform(mousePos)
deltaX = lastX is not None and self.mouseX - lastX
deltaY = lastY is not None and self.mouseY - lastY
if not self.showverts:
return
#if self.in_edit_parts_mode:
# return
quick_resize = (self._poly_held is True and (
(event.button == self.MIDDLE_BUTTON) or
(event.button == self.RIGHT_BUTTON) or
(event.button == self.LEFT_BUTTON and event.key == 'ctrl')
))
if self._poly_held is True and self._ind is not None:
# Resize by dragging corner
self._selected_poly.resize_poly(self.mouseX, self.mouseY,
self._ind, self.ax)
self._selected_poly.anchor_idx = self._ind
elif quick_resize:
# Quick resize with special click
anchor_idx = self._selected_poly.anchor_idx
idx = (anchor_idx + 2) % 4 # choose opposite anchor point
self._selected_poly.resize_poly(self.mouseX, self.mouseY, idx,
self.ax)
elif self._current_rotate_poly:
# Rotate using handle
cx, cy = points_center(self._current_rotate_poly.xy)
theta = np.arctan2(cy - self.mouseY, cx - self.mouseX) - TAU / 4
dtheta = theta - self._current_rotate_poly.theta
self._current_rotate_poly.rotate_poly(dtheta, self.ax)
elif self._ind is None and event.button == self.LEFT_BUTTON:
# Translate by dragging inside annot
flag = deltaX is not None and deltaY is not None
if self._poly_held is True and flag:
self._selected_poly.move_poly(deltaX, deltaY, self.ax)
self._ind = None
else:
return
self.update_UI()
def on_click_release(self, event):
super(AnnotationInteraction, self).on_click_release(event)
#if self._poly_held is True:
self._poly_held = False
self._current_rotate_poly = None
if not self.showverts:
return
if self._selected_poly is None:
return
_flag = (
self._ind is None or
self._poly_held is False or
(self._ind is not None and
self.is_down['left'] is True and
self._selected_poly is not None
)
)
if _flag:
self._selected_poly.set_alpha(0)
#self._selected_poly.set_facecolor('white')
self.update_UI()
if self._ind is None:
return
if len(self.editable_polys) == 0:
print('[interact_annot] No polygons on screen')
return
if self._selected_poly is None:
print('[interact_annot] WARNING: Polygon unknown.'
' Using default. (2)')
self._selected_poly = self.get_most_recently_added_poly()
curr_xy = self._selected_poly.xy[self._ind]
if self.ind_xy is not None:
if np.all(np.fabs(self.ind_xy - curr_xy) < 3):
return
self._ind = None
self._poly_held = False
self.draw()
def on_figure_leave(self, event):
if self.debug > 0:
print('[interact_annot] figure leave')
#self.print_status()
#self.on_click_release(event)
self._poly_held = False
self._ind = None
self.reset_mouse_state()
#self.print_status()
def on_key_press(self, event):
if self.debug > 0:
print('[interact_annot] on_key_press')
print('[interact_annot] Got key: %r' % event.key)
print('[interact_annot] Got key: %r' % event.key)
if not event.inaxes:
return
if event.key == ACCEPT_SAVE_HOTKEY:
self.save_and_exit(event)
elif event.key == ADD_RECTANGLE_HOTKEY:
self.add_new_poly()
elif event.key == ADD_RECTANGLE_FULL_HOTKEY:
self.add_new_poly(full=True)
elif event.key == DEL_RECTANGLE_HOTKEY:
self.delete_current_poly()
elif event.key == TOGGLE_LABEL_HOTKEY:
self.toggle_species_label()
if re.match('escape', event.key):
self.edit_poly_parts(None)
if re.match('^backspace$', event.key):
self._selected_poly.set_species(DEFAULT_SPECIES_TAG)
if re.match('^tab$', event.key):
self._selected_poly.increment_species(amount=1)
if re.match('^ctrl\+tab$', event.key):
self._selected_poly.increment_species(amount=-1)
# NEXT ANND PREV COMMAND
def _matches_hotkey(key, hotkeys):
return any([re.match(hk, key) is not None for hk in
ut.ensure_iterable(hotkeys)])
if _matches_hotkey(event.key, PREV_IMAGE_HOTKEYS):
self.prev_image(event)
if _matches_hotkey(event.key, NEXT_IMAGE_HOTKEYS):
self.next_image(event)
self.draw()
#def poly_changed(self, poly):
# """ this method is called whenever the polygon object is called """
# print('poly_changed poly=%r' % (poly,))
# # only copy the artist props to the line (except visibility)
# #vis = poly.lines.get_visible()
# #vis = poly.handle.get_visible()
# #poly.lines.set_visible(vis)
# #poly.handle.set_visible(vis)
def pretty_hotkey_map(hotkeys):
if hotkeys is None:
return ''
hotkeys = [hotkeys] if not isinstance(hotkeys, list) else hotkeys
mapping = {
#'right': 'right arrow',
#'left': 'left arrow',
}
mapped_hotkeys = [mapping.get(hk, hk) for hk in hotkeys]
hotkey_str = '(' + ut.conj_phrase(mapped_hotkeys, 'or') + ')'
return hotkey_str
def apply_mask(img, mask):
masked_img = img.copy()
masked_img[~mask] = np.uint8(np.clip(masked_img[~mask] - 100., 0, 255))
return masked_img
def points_center(pts):
# the polygons have the first point listed twice in order for them to be
# drawn as closed, but that point shouldn't be counted twice for computing
# the center (hence the [:-1] slice)
return np.array(pts[:-1]).mean(axis=0)
def rotate_points_around(points, theta, ax, ay):
"""
References:
http://www.euclideanspace.com/maths/geometry/affine/aroundPoint/matrix2d/
"""
# TODO: Can use vtool_ibeis for this
sin, cos, array = np.sin, np.cos, np.array
augpts = array([array((x, y, 1)) for (x, y) in points])
ct = cos(theta)
st = sin(theta)
# correct matrix obtained from
rot_mat = array(
[(ct, -st, ax - ct * ax + st * ay),
(st, ct, ay - st * ax - ct * ay),
( 0, 0, 1)]
)
return [(x, y) for (x, y, z) in rot_mat.dot(augpts.T).T]
def calc_display_coords(oldcoords, theta):
return rotate_points_around(oldcoords, theta, *points_center(oldcoords))
def polarDelta(p1, p2):
mag = vt.L2(p1, p2)
theta = np.arctan2(p2[1] - p1[1], p2[0] - p1[0])
return [mag, theta]
def apply_polarDelta(poldelt, cart):
newx = cart[0] + (poldelt[0] * np.cos(poldelt[1]))
newy = cart[1] + (poldelt[0] * np.sin(poldelt[1]))
return (newx, newy)
def is_within_distance_from_line(pt, line, max_dist):
pt = np.array(pt)
line = np.array(line)
return vt.distance_to_lineseg(pt, line[0], line[1]) <= max_dist
def check_min_wh(coords):
"""
Depends on hardcoded indices, which is inelegant, but
we're already depending on those for the FUDGE_FACTORS
array above
0----1
| |
3----2
"""
MIN_W = 5
MIN_H = 5
# the seperate 1 and 2 variables are not strictly necessary, but
# provide a sanity check to ensure that we're dealing with the
# right shape
#w, h = vt.get_pointset_extent_wh(np.array(coords))
w1 = coords[1][0] - coords[0][0]
w2 = coords[2][0] - coords[3][0]
h1 = coords[3][1] - coords[0][1]
h2 = coords[2][1] - coords[1][1]
assert np.isclose(w1, w2), ('w1: %r, w2: %r' % (w1, w2))
assert np.isclose(h1, h2), ('h1: %r, h2: %r' % (h1, h2))
w, h = w1, h1
#print('w, h = (%r, %r)' % (w1, h1))
return (MIN_W < w) and (MIN_H < h)
def default_vertices(img, polys=None, mouseX=None, mouseY=None):
"""Default to rectangle that has a quarter-width/height border."""
(h, w) = img.shape[0:2]
# Center the new verts around wherever the mouse is
if mouseX is not None and mouseY is not None:
center_x = mouseX
center_h = mouseY
else:
center_x = w // 2
center_h = h // 2
if polys is not None and len(polys) > 0:
# Use the largest polygon size as the default verts
wh_list = np.array([vt.bbox_from_verts(poly.xy)[2:4]
for poly in six.itervalues(polys)])
w_, h_ = wh_list.max(axis=0) // 2
else:
# If no poly exists use 1/4 of the image size
w_, h_ = (w // 4, h // 4)
# Get the x/y extents by offseting the centers
x1, x2 = np.array([center_x, center_x]) + (w_ * np.array([-1, 1]))
y1, y2 = np.array([center_h, center_h]) + (h_ * np.array([-1, 1]))
# Clip to bounds
x1 = max(x1, 1)
y1 = max(y1, 1)
x2 = min(x2, w - 1)
y2 = min(y2, h - 1)
return ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
def check_valid_coords(ax, coords_list):
return all([check_dims(ax, xy_pt) for xy_pt in coords_list])
def check_dims(ax, xy_pt, margin=0.5):
"""
checks if bounding box dims are ok
Allow the bounding box to go off the image
so orientations can be done correctly
"""
num_out = 0
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if xy_pt[0] < xlim[0] + margin:
num_out += 1
if xy_pt[0] > xlim[1] - margin:
num_out += 1
if xy_pt[1] < ylim[1] + margin:
num_out += 1
if xy_pt[1] > ylim[0] - margin:
num_out += 1
return num_out <= 3
def enforce_dims(ax, xy_pt, margin=0.5):
"""
ONLY USE THIS ON UNROTATED RECTANGLES, as to do otherwise may yield
arbitrary polygons
"""
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if xy_pt[0] < xlim[0] + margin:
xy_pt[0] = xlim[0] + margin
if xy_pt[0] > xlim[1] - margin:
xy_pt[0] = xlim[1] - margin
if xy_pt[1] < ylim[1] + margin:
xy_pt[1] = ylim[1] + margin
if xy_pt[1] > ylim[0] - margin:
xy_pt[1] = ylim[0] - margin
return True
def test_interact_annots():
r"""
CommandLine:
python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show
Example:
>>> # ENABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> import plottool_ibeis as pt
>>> # build test data
>>> # execute function
>>> self = test_interact_annots()
>>> # verify results
>>> print(self)
>>> pt.show_if_requested()
"""
print('[interact_annot] *** START DEMO ***')
verts_list = [
((0, 400), (400, 400), (400, 0), (0, 0), (0, 400)),
((400, 700), (700, 700), (700, 400), (400, 400), (400, 700))
]
#if img is None:
try:
img_url = 'http://i.imgur.com/Vq9CLok.jpg'
img_fpath = ut.grab_file_url(img_url)
img = vt.imread(img_fpath)
except Exception as ex:
print('[interact_annot] cant read zebra: %r' % ex)
img = np.random.uniform(0, 255, size=(100, 100))
valid_species = ['species1', 'species2']
metadata_list = [{'name': 'foo'}, None]
self = AnnotationInteraction(img, verts_list=verts_list,
valid_species=valid_species,
metadata_list=metadata_list,
fnum=0) # NOQA
return self
if __name__ == '__main__':
"""
CommandLine:
python -m plottool_ibeis.interact_annotations --exec-test_interact_annots --show
CommandLine:
python -m plottool_ibeis.interact_annotations
python -m plottool_ibeis.interact_annotations --allexamples
python -m plottool_ibeis.interact_annotations --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
CSC591ADBI-TeamProjects/Bitcoin-Price-Prediction | bitcoin.py | 1 | 5370 | import statsmodels.formula.api as smf
import sklearn.metrics as sm
import pandas as pd
import numpy as np
import math
import sys
# The path to the data folder should be given as input
if len(sys.argv) != 2:
print('bitcoin.py <path to data folder>')
sys.exit(1)
data_path = sys.argv[1]
# Reading the vectors from the given csv files
train1_90 = pd.read_csv(data_path+'/train1_90.csv')
train1_180 = pd.read_csv(data_path+'/train1_180.csv')
train1_360 = pd.read_csv(data_path+'/train1_360.csv')
train2_90 = pd.read_csv(data_path+'/train2_90.csv')
train2_180 = pd.read_csv(data_path+'/train2_180.csv')
train2_360 = pd.read_csv(data_path+'/train2_360.csv')
test_90 = pd.read_csv(data_path+'/test_90.csv')
test_180 = pd.read_csv(data_path+'/test_180.csv')
test_360 = pd.read_csv(data_path+'/test_360.csv')
def computeDelta(wt, X, Xi):
"""
This function computes equation 6 of the paper, but with the euclidean distance
replaced by the similarity function given in Equation 9.
Parameters
----------
wt : int
This is the constant c at the top of the right column on page 4.
X : A row of Panda Dataframe
Corresponds to (x, y) in Equation 6.
Xi : Panda Dataframe
Corresponds to a dataframe of (xi, yi) in Equation 6.
Returns
-------
float
The output of equation 6, a prediction of the average price change.
"""
# YOUR CODE GOES HERE
num = 0.0
den = 0.0
for i in xrange(0,len(Xi)):
Yi = Xi.iloc[i][-1]
xi = Xi.iloc[i][0:-1]
s_X_xi = similarity(X[0:-1],xi)
#shouldn't it be "wt" instead of "weight" in following 2 lines?
num += float(Yi*math.exp(wt*s_X_xi))
den += float(math.exp(wt*s_X_xi))
return float(num)/den
def similarity(a,b):
#is similarity not simply:
#sim = ((a-a.mean())*(b-b.mean())).sum()/float(len(a)*a.std()*b.std())
#std_a = std(a)
#std_b = std(b)
#mu_a = float(sum(a))/len(a)
#mu_b = float(sum(b))/len(b)
std_a = np.std(a)
std_b = np.std(b)
mu_a = np.mean(a)
mu_b = np.mean(b)
M = len(b)
sumab = 0
for z in xrange(0, M):
sumab += (a[z] - mu_a) * (b[z] - mu_b)
return float(sumab) / (M*std_a*std_b)
#i think this is for variance, sqrt missing
#def std(a):
# suma = 0
# mu = float(sum(a))/len(a)
# for ai in a:
# suma += (ai - mu)**2
# return float(suma)/len(a)
# Perform the Bayesian Regression to predict the average price change for each dataset of train2 using train1 as input.
# These will be used to estimate the coefficients (w0, w1, w2, and w3) in equation 8.
weight = 2 # This constant was not specified in the paper, but we will use 2.
trainDeltaP90 = np.empty(0)
trainDeltaP180 = np.empty(0)
trainDeltaP360 = np.empty(0)
for i in xrange(0,len(train1_90.index)) :
trainDeltaP90 = np.append(trainDeltaP90, computeDelta(weight,train2_90.iloc[i],train1_90))
for i in xrange(0,len(train1_180.index)) :
trainDeltaP180 = np.append(trainDeltaP180, computeDelta(weight,train2_180.iloc[i],train1_180))
for i in xrange(0,len(train1_360.index)) :
trainDeltaP360 = np.append(trainDeltaP360, computeDelta(weight,train2_360.iloc[i],train1_360))
# Actual deltaP values for the train2 data.
trainDeltaP = np.asarray(train2_360[['Yi']])
trainDeltaP = np.reshape(trainDeltaP, -1)
# Combine all the training data
d = {'deltaP': trainDeltaP,
'deltaP90': trainDeltaP90,
'deltaP180': trainDeltaP180,
'deltaP360': trainDeltaP360 }
trainData = pd.DataFrame(d)
# Feed the data: [deltaP, deltaP90, deltaP180, deltaP360] to train the linear model.
# Use the statsmodels ols function.
# Use the variable name model for your fitted model
# YOUR CODE HERE
model = smf.ols(formula = 'deltaP ~ deltaP90 + deltaP180 + deltaP360', data = trainData).fit()
# Print the weights from the model
print model.params
# Perform the Bayesian Regression to predict the average price change for each dataset of test using train1 as input.
# This should be similar to above where it was computed for train2.
# YOUR CODE HERE
testDeltaP90 = np.empty(0)
testDeltaP180 = np.empty(0)
testDeltaP360 = np.empty(0)
for i in xrange(0,len(train1_90.index)) :
testDeltaP90 = np.append(testDeltaP90, computeDelta(weight,test_90.iloc[i],train1_90))
for i in xrange(0,len(train1_180.index)) :
testDeltaP180 = np.append(testDeltaP180, computeDelta(weight,test_180.iloc[i],train1_180))
for i in xrange(0,len(train1_360.index)) :
testDeltaP360 = np.append(testDeltaP360, computeDelta(weight,test_360.iloc[i],train1_360))
# Actual deltaP values for test data.
# YOUR CODE HERE (use the right variable names so the below code works)
testDeltaP = np.asarray(test_360[['Yi']])
testDeltaP = np.reshape(testDeltaP, -1)
# Combine all the test data
d = {'deltaP': testDeltaP,
'deltaP90': testDeltaP90,
'deltaP180': testDeltaP180,
'deltaP360': testDeltaP360}
testData = pd.DataFrame(d)
# Predict price variation on the test data set.
result = model.predict(testData)
compare = { 'Actual': testDeltaP,
'Predicted': result }
compareDF = pd.DataFrame(compare)
# Compute the MSE and print the result
# HINT: consider using the sm.mean_squared_error function
MSE = 0.0
# YOUR CODE HERE
print "The MSE is %f" % (sm.mean_squared_error(compareDF['Actual'], compareDF['Predicted']))
| mit |
spatialaudio/sweep | lin_sweep_kaiser_window_script3/merge_scripts.py | 2 | 1658 | #!/usr/bin/env python3
""" Script to merge scripts"""
import numpy as np
import matplotlib.pyplot as plt
script3 = np.genfromtxt('lin_sweep_kaiser_window_script3.txt')
script3_1 = np.genfromtxt('lin_sweep_kaiser_window_script3_1.txt')
fade_in_list = script3[:, 0]
# Script3
pnr_list = script3[:, 1]
spectrum_distance_list = script3[:, 2]
# Script3_1 (unwindowed deconvolution)
pnr_unwindowed_deconvolution_list = script3_1[:, 1]
spectrum_distance_unwindowed_deconvolution_list = script3_1[:, 2]
plt.plot(fade_in_list, pnr_list, label='Deconvolution: Excitation windowed')
plt.plot(
fade_in_list,
pnr_unwindowed_deconvolution_list,
label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Peak to noise ratio depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('Peak to noise ratio / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='center right')
plt.xlim([-10, 1000])
plt.savefig('pnr.png')
plt.close()
NFFT_bandstop = 88201
max_measurement = 7.09207671865
plt.plot(fade_in_list, -10 * np.log10(1 / NFFT_bandstop *
np.asarray(spectrum_distance_list) / max_measurement), label='Deconvolution: Excitation windowed')
plt.plot(fade_in_list,
-10 * np.log10(1 / NFFT_bandstop * np.asarray(spectrum_distance_unwindowed_deconvolution_list) /
max_measurement), label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Spectrum Distance depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('(Spectrum Distance / max(Spectrum Distance)) / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='lower left')
plt.savefig('spectral_distance.png')
| mit |
theoryno3/pylearn2 | pylearn2/scripts/browse_conv_weights.py | 44 | 7605 | #! /usr/bin/env python
"""
Interactive viewer for the convolutional weights in a pickled model.
Unlike ./show_weights, this shows one unit's weights at a time. This
allows it to display weights from higher levels (which can have 100s
of input channels), not just the first.
"""
import os
import sys
import warnings
import argparse
import numpy
from pylearn2.models.mlp import MLP, ConvElemwise, CompositeLayer
from pylearn2.models.maxout import MaxoutConvC01B
from pylearn2.utils import safe_zip, serial
from pylearn2.space import Conv2DSpace
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
def _parse_args():
parser = argparse.ArgumentParser(
description=("Interactive browser of convolutional weights. "
"Up/down keys switch layers. "
"Left/right keys switch units."))
parser.add_argument('-i',
'--input',
required=True,
help=".pkl file of model")
result = parser.parse_args()
if os.path.splitext(result.input)[1] != '.pkl':
print("Expected --input to end in .pkl, got %s." % result.input)
sys.exit(1)
return result
def _get_conv_layers(layer, result=None):
'''
Returns a list of the convolutional layers in a model.
Returns
-------
rval: list
Lists the convolutional layers (ConvElemwise, MaxoutConvC01B).
'''
if result is None:
result = []
if isinstance(layer, (MLP, CompositeLayer)):
for sub_layer in layer.layers:
_get_conv_layers(sub_layer, result)
elif isinstance(layer, (MaxoutConvC01B, ConvElemwise)):
result.append(layer)
return result
def _get_conv_weights_bc01(layer):
'''
Returns a conv. layer's weights in BC01 format.
Parameters
----------
layer: MaxoutConvC01B or ConvElemwise
Returns
-------
rval: numpy.ndarray
The kernel weights in BC01 axis order. (B: output channels, C: input
channels)
'''
assert isinstance(layer, (MaxoutConvC01B, ConvElemwise))
weights = layer.get_params()[0].get_value()
if isinstance(layer, MaxoutConvC01B):
c01b = Conv2DSpace(shape=weights.shape[1:3],
num_channels=weights.shape[0],
axes=('c', 0, 1, 'b'))
bc01 = Conv2DSpace(shape=c01b.shape,
num_channels=c01b.num_channels,
axes=('b', 'c', 0, 1))
weights = c01b.np_format_as(weights, bc01)
elif isinstance(layer, ConvElemwise):
weights = weights[:, :, ::-1, ::-1] # reverse 0, 1 axes
return weights
def _num_conv_units(conv_layer):
'''
Returns a conv layer's number of output channels.
'''
assert isinstance(conv_layer, (MaxoutConvC01B, ConvElemwise))
weights = conv_layer.get_params()[0].get_value()
if isinstance(conv_layer, MaxoutConvC01B):
return weights.shape[-1]
elif isinstance(conv_layer, ConvElemwise):
return weights.shape[0]
def main():
"Entry point of script."
args = _parse_args()
model = serial.load(args.input)
if not isinstance(model, MLP):
print("Expected the .pkl file to contain an MLP, got a %s." %
str(model.type))
sys.exit(1)
def get_figure_and_axes(conv_layers, window_width=800):
kernel_display_width = 20
margin = 5
grid_square_width = kernel_display_width + margin
num_columns = window_width // grid_square_width
max_num_channels = numpy.max([layer.get_input_space().num_channels
for layer in conv_layers])
# pdb.set_trace()
num_rows = max_num_channels // num_columns
if num_rows * num_columns < max_num_channels:
num_rows += 1
assert num_rows * num_columns >= max_num_channels
window_width = 15
# '* 1.8' comse from the fact that rows take up about 1.8 times as much
# space as columns, due to the title text.
window_height = window_width * ((num_rows * 1.8) / num_columns)
figure, all_axes = pyplot.subplots(num_rows,
num_columns,
squeeze=False,
figsize=(window_width,
window_height))
for unit_index, axes in enumerate(all_axes.flat):
subplot_title = axes.set_title('%d' % unit_index)
subplot_title.set_size(8)
subplot_title.set_color((.3, .3, .3))
# Hides tickmarks
for axes_row in all_axes:
for axes in axes_row:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
return figure, all_axes
conv_layers = _get_conv_layers(model)
figure, all_axes = get_figure_and_axes(conv_layers)
title_text = figure.suptitle("title")
pyplot.tight_layout(h_pad=.1, w_pad=.5) # in inches
layer_index = numpy.array(0)
unit_indices = numpy.zeros(len(model.layers), dtype=int)
def redraw():
'''
Draws the currently selected convolutional kernel.
'''
axes_list = all_axes.flatten()
layer = conv_layers[layer_index]
unit_index = unit_indices[layer_index, ...]
weights = _get_conv_weights_bc01(layer)[unit_index, ...]
active_axes = axes_list[:weights.shape[0]]
for axes, weights in safe_zip(active_axes, weights):
axes.set_visible(True)
axes.imshow(weights, cmap='gray', interpolation='nearest')
assert len(frozenset(active_axes)) == len(active_axes)
unused_axes = axes_list[len(active_axes):]
assert len(frozenset(unused_axes)) == len(unused_axes)
assert len(axes_list) == len(active_axes) + len(unused_axes)
for axes in unused_axes:
axes.set_visible(False)
title_text.set_text("Layer %s, unit %d" %
(layer.layer_name,
unit_indices[layer_index]))
figure.canvas.draw()
def on_key_press(event):
"Callback for key press events"
def increment(index, size, step):
"""
Increments an index in-place.
Parameters
----------
index: numpy.ndarray
scalar (0-dim array) of dtype=int. Non-negative.
size: int
One more than the maximum permissible index.
step: int
-1, 0, or 1.
"""
assert index >= 0
assert step in (0, -1, 1)
index[...] = (index + size + step) % size
if event.key in ('up', 'down'):
increment(layer_index,
len(conv_layers),
1 if event.key == 'up' else -1)
unit_index = unit_indices[layer_index]
redraw()
elif event.key in ('right', 'left'):
unit_index = unit_indices[layer_index:layer_index + 1]
increment(unit_index,
_num_conv_units(conv_layers[layer_index]),
1 if event.key == 'right' else -1)
redraw()
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw()
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
henryroe/ztv | ztv/stats_panel.py | 1 | 24355 | from __future__ import absolute_import
import wx
from wx.lib.pubsub import pub
from matplotlib.patches import Rectangle
from matplotlib import cm
import numpy as np
from astropy.stats import sigma_clipped_stats
import sys
from .ztv_wx_lib import set_textctrl_background_color, validate_textctrl_str, textctrl_output_only_background_color
from .ztv_lib import send_to_stream
class StatsPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.ztv_frame = self.GetTopLevelParent()
self.ztv_frame.primary_image_panel.popup_menu_cursor_modes.append('Stats box')
self.ztv_frame.primary_image_panel.available_cursor_modes['Stats box'] = {
'set-to-mode':self.set_cursor_to_stats_box_mode,
'on_button_press':self.on_button_press,
'on_motion':self.on_motion,
'on_button_release':self.on_button_release}
self.textentry_font = wx.Font(14, wx.FONTFAMILY_MODERN, wx.NORMAL, wx.FONTWEIGHT_LIGHT, False)
self.stats_info = None
self.last_string_values = {'x0':'', 'xsize':'', 'x1':'', 'y0':'', 'ysize':'', 'y1':''}
self.stats_rect = Rectangle((0, 0), 10, 10, color='magenta', fill=False, zorder=100)
# use self.stats_rect as where we store/retrieve the x0,y0,x1,y1
# x0,y0,x1,y1 should be limited to range of 0 to shape-1
# but, stats should be calculated over e.g. x0:x1+1 (so that have pixels to do stats on even if x0==x1)
# and, width/height of stats_rect should always be >= 0
values_sizer = wx.FlexGridSizer( 10, 5, 0, 0 )
values_sizer.SetFlexibleDirection( wx.BOTH )
values_sizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
self.low_static_text = wx.StaticText( self, wx.ID_ANY, u"Low", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_RIGHT )
self.low_static_text.Wrap( -1 )
values_sizer.Add(self.low_static_text, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 0)
self.low_static_text = wx.StaticText( self, wx.ID_ANY, u"# pix", wx.DefaultPosition, wx.DefaultSize, 0 )
self.low_static_text.Wrap( -1 )
values_sizer.Add(self.low_static_text, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 0)
self.high_static_text = wx.StaticText( self, wx.ID_ANY, u"High", wx.DefaultPosition, wx.DefaultSize, 0 )
self.high_static_text.Wrap( -1 )
values_sizer.Add(self.high_static_text, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 0)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
self.x_static_text = wx.StaticText( self, wx.ID_ANY, u"x", wx.DefaultPosition, wx.DefaultSize, 0 )
self.x_static_text.Wrap( -1 )
values_sizer.Add(self.x_static_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 0)
self.x0_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.x0_textctrl.SetFont(self.textentry_font)
values_sizer.Add(self.x0_textctrl, 0, wx.ALL, 2)
self.x0_textctrl.Bind(wx.EVT_TEXT, self.x0_textctrl_changed)
self.x0_textctrl.Bind(wx.EVT_TEXT_ENTER, self.x0_textctrl_entered)
self.xsize_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.xsize_textctrl.SetFont(self.textentry_font)
values_sizer.Add(self.xsize_textctrl, 0, wx.ALL, 2)
self.xsize_textctrl.Bind(wx.EVT_TEXT, self.xsize_textctrl_changed)
self.xsize_textctrl.Bind(wx.EVT_TEXT_ENTER, self.xsize_textctrl_entered)
self.x1_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.x1_textctrl.SetFont(self.textentry_font)
values_sizer.Add(self.x1_textctrl, 0, wx.ALL, 2)
self.x1_textctrl.Bind(wx.EVT_TEXT, self.x1_textctrl_changed)
self.x1_textctrl.Bind(wx.EVT_TEXT_ENTER, self.x1_textctrl_entered)
self.npix_static_text = wx.StaticText( self, wx.ID_ANY, u"# pixels", wx.DefaultPosition, wx.DefaultSize, 0 )
self.npix_static_text.Wrap( -1 )
values_sizer.Add(self.npix_static_text, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_BOTTOM, 0)
self.y_static_text = wx.StaticText( self, wx.ID_ANY, u"y", wx.DefaultPosition, wx.DefaultSize, 0 )
self.y_static_text.Wrap( -1 )
values_sizer.Add(self.y_static_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 0)
self.y0_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.y0_textctrl.SetFont(self.textentry_font)
values_sizer.Add(self.y0_textctrl, 0, wx.ALL, 2)
self.y0_textctrl.Bind(wx.EVT_TEXT, self.y0_textctrl_changed)
self.y0_textctrl.Bind(wx.EVT_TEXT_ENTER, self.y0_textctrl_entered)
self.ysize_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.ysize_textctrl.SetFont(self.textentry_font)
values_sizer.Add(self.ysize_textctrl, 0, wx.ALL, 2)
self.ysize_textctrl.Bind(wx.EVT_TEXT, self.ysize_textctrl_changed)
self.ysize_textctrl.Bind(wx.EVT_TEXT_ENTER, self.ysize_textctrl_entered)
self.y1_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_PROCESS_ENTER)
self.y1_textctrl.SetFont(self.textentry_font)
values_sizer.Add(self.y1_textctrl, 0, wx.ALL, 2)
self.y1_textctrl.Bind(wx.EVT_TEXT, self.y1_textctrl_changed)
self.y1_textctrl.Bind(wx.EVT_TEXT_ENTER, self.y1_textctrl_entered)
self.npix_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.npix_textctrl.SetFont(self.textentry_font)
self.npix_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.npix_textctrl, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT, 0)
values_sizer.AddSpacer((0,15), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
self.median_static_text = wx.StaticText( self, wx.ID_ANY, u"Median", wx.DefaultPosition, wx.DefaultSize, 0 )
self.median_static_text.Wrap( -1 )
values_sizer.Add(self.median_static_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT, 0)
self.median_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.median_textctrl.SetFont(self.textentry_font)
self.median_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.median_textctrl, 0, wx.ALL, 2)
self.robust_static_text = wx.StaticText( self, wx.ID_ANY, u"Robust", wx.DefaultPosition, wx.DefaultSize, 0 )
self.robust_static_text.Wrap( -1 )
values_sizer.Add(self.robust_static_text, 0, wx.ALL|wx.ALIGN_BOTTOM|wx.ALIGN_CENTER_HORIZONTAL, 0)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
self.mean_static_text = wx.StaticText( self, wx.ID_ANY, u"Mean", wx.DefaultPosition, wx.DefaultSize, 0 )
self.mean_static_text.Wrap( -1 )
values_sizer.Add(self.mean_static_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT, 0)
self.mean_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.mean_textctrl.SetFont(self.textentry_font)
self.mean_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.mean_textctrl, 0, wx.ALL, 2)
self.robust_mean_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.robust_mean_textctrl.SetFont(self.textentry_font)
self.robust_mean_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.robust_mean_textctrl, 0, wx.ALL, 2)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
self.stdev_static_text = wx.StaticText( self, wx.ID_ANY, u"Stdev", wx.DefaultPosition, wx.DefaultSize, 0 )
self.stdev_static_text.Wrap( -1 )
values_sizer.Add(self.stdev_static_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT, 0)
self.stdev_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.stdev_textctrl.SetFont(self.textentry_font)
self.stdev_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.stdev_textctrl, 0, wx.ALL, 2)
self.robust_stdev_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.robust_stdev_textctrl.SetFont(self.textentry_font)
self.robust_stdev_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.robust_stdev_textctrl, 0, wx.ALL, 2)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,15), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
self.min_static_text = wx.StaticText( self, wx.ID_ANY, u"Min", wx.DefaultPosition, wx.DefaultSize, 0 )
self.min_static_text.Wrap( -1 )
values_sizer.Add(self.min_static_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT, 0)
self.minval_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.minval_textctrl.SetFont(self.textentry_font)
self.minval_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.minval_textctrl, 0, wx.ALL, 2)
self.minpos_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.minpos_textctrl.SetFont(self.textentry_font)
self.minpos_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.minpos_textctrl, 0, wx.ALL, 2)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
values_sizer.AddSpacer((0,0), 0, wx.EXPAND)
self.max_static_text = wx.StaticText( self, wx.ID_ANY, u"Max", wx.DefaultPosition, wx.DefaultSize, 0 )
self.max_static_text.Wrap( -1 )
values_sizer.Add(self.max_static_text, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT, 0)
self.maxval_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.maxval_textctrl.SetFont(self.textentry_font)
self.maxval_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.maxval_textctrl, 0, wx.ALL, 2)
self.maxpos_textctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
wx.TE_READONLY)
self.maxpos_textctrl.SetFont(self.textentry_font)
self.maxpos_textctrl.SetBackgroundColour(textctrl_output_only_background_color)
values_sizer.Add(self.maxpos_textctrl, 0, wx.ALL, 2)
self.hideshow_button = wx.Button(self, wx.ID_ANY, u"Show", wx.DefaultPosition, wx.DefaultSize, 0)
values_sizer.Add(self.hideshow_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 2)
self.hideshow_button.Bind(wx.EVT_BUTTON, self.on_hideshow_button)
v_sizer1 = wx.BoxSizer(wx.VERTICAL)
v_sizer1.AddStretchSpacer(1.0)
v_sizer1.Add(values_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL)
v_sizer1.AddStretchSpacer(1.0)
self.SetSizer(v_sizer1)
pub.subscribe(self.queue_update_stats, 'recalc-display-image-called')
pub.subscribe(self._set_stats_box_parameters, 'set-stats-box-parameters')
pub.subscribe(self.publish_stats_to_stream, 'get-stats-box-info')
def publish_stats_to_stream(self, msg=None):
wx.CallAfter(send_to_stream, sys.stdout, ('stats-box-info', self.stats_info))
def on_button_press(self, event):
self.select_panel()
self.update_stats_box(event.xdata, event.ydata, event.xdata, event.ydata)
self.redraw_overplot_on_image()
self.cursor_stats_box_x0, self.cursor_stats_box_y0 = event.xdata, event.ydata
def on_motion(self, event):
if event.button is not None:
self.update_stats_box(self.cursor_stats_box_x0, self.cursor_stats_box_y0, event.xdata, event.ydata)
self.redraw_overplot_on_image()
self.update_stats()
def on_button_release(self, event):
self.redraw_overplot_on_image()
self.update_stats()
def set_cursor_to_stats_box_mode(self, event):
self.ztv_frame.primary_image_panel.cursor_mode = 'Stats box'
self.ztv_frame.stats_panel.select_panel()
self.ztv_frame.stats_panel.highlight_panel()
def queue_update_stats(self, msg=None):
"""
wrapper to call update_stats from CallAfter in order to make GUI as responsive as possible.
"""
wx.CallAfter(self.update_stats, msg=None)
def _set_stats_box_parameters(self, msg):
"""
wrapper to update_stats_box to receive messages & translate them correctly
"""
x0,x1,y0,y1 = [None]*4
if msg['xrange'] is not None:
x0,x1 = msg['xrange']
if msg['yrange'] is not None:
y0,y1 = msg['yrange']
if msg['xrange'] is not None or msg['yrange'] is not None:
self.update_stats_box(x0, y0, x1, y1)
if msg['show_overplot'] is not None:
if msg['show_overplot']:
self.redraw_overplot_on_image()
else:
self.remove_overplot_on_image()
send_to_stream(sys.stdout, ('set-stats-box-parameters-done', True))
def update_stats_box(self, x0=None, y0=None, x1=None, y1=None):
if x0 is None:
x0 = self.stats_rect.get_x()
if y0 is None:
y0 = self.stats_rect.get_y()
if x1 is None:
x1 = self.stats_rect.get_x() + self.stats_rect.get_width()
if y1 is None:
y1 = self.stats_rect.get_y() + self.stats_rect.get_height()
if x0 > x1:
x0, x1 = x1, x0
if y0 > y1:
y0, y1 = y1, y0
x0 = min(max(0, x0), self.ztv_frame.display_image.shape[1] - 1)
y0 = min(max(0, y0), self.ztv_frame.display_image.shape[0] - 1)
x1 = min(max(0, x1), self.ztv_frame.display_image.shape[1] - 1)
y1 = min(max(0, y1), self.ztv_frame.display_image.shape[0] - 1)
self.stats_rect.set_bounds(x0, y0, x1 - x0, y1 - y0)
if self.hideshow_button.GetLabel() == 'Hide':
self.ztv_frame.primary_image_panel.figure.canvas.draw()
self.update_stats()
def remove_overplot_on_image(self):
self.ztv_frame.primary_image_panel.remove_patch('stats_panel:stats_rect')
self.hideshow_button.SetLabel(u"Show")
def redraw_overplot_on_image(self):
self.ztv_frame.primary_image_panel.add_patch('stats_panel:stats_rect', self.stats_rect)
self.hideshow_button.SetLabel(u"Hide")
def on_hideshow_button(self, evt):
if self.hideshow_button.GetLabel() == 'Hide':
self.remove_overplot_on_image()
else:
self.redraw_overplot_on_image()
def get_x0y0x1y1_from_stats_rect(self):
x0 = self.stats_rect.get_x()
y0 = self.stats_rect.get_y()
x1 = x0 + self.stats_rect.get_width()
y1 = y0 + self.stats_rect.get_height()
return x0,y0,x1,y1
def update_stats(self, msg=None):
x0,y0,x1,y1 = self.get_x0y0x1y1_from_stats_rect()
x0, y0 = int(np.round(x0)), int(np.round(y0))
x1, y1 = int(np.round(x1)), int(np.round(y1))
self.last_string_values['x0'] = str(int(x0))
self.x0_textctrl.SetValue(self.last_string_values['x0'])
self.last_string_values['y0'] = str(int(y0))
self.y0_textctrl.SetValue(self.last_string_values['y0'])
x_npix = int(x1 - x0 + 1)
self.last_string_values['xsize'] = str(x_npix)
self.xsize_textctrl.SetValue(self.last_string_values['xsize'])
y_npix = int(y1 - y0 + 1)
self.last_string_values['ysize'] = str(y_npix)
self.ysize_textctrl.SetValue(self.last_string_values['ysize'])
self.last_string_values['x1'] = str(int(x1))
self.x1_textctrl.SetValue(self.last_string_values['x1'])
self.last_string_values['y1'] = str(int(y1))
self.y1_textctrl.SetValue(self.last_string_values['y1'])
self.npix_textctrl.SetValue(str(x_npix * y_npix))
stats_data = self.ztv_frame.display_image[y0:y1+1, x0:x1+1]
finite_mask = np.isfinite(stats_data)
if finite_mask.max() is np.True_:
stats_data_mean = stats_data[finite_mask].mean()
stats_data_median = np.median(stats_data[finite_mask])
stats_data_std = stats_data[finite_mask].std()
robust_mean, robust_median, robust_std = sigma_clipped_stats(stats_data[finite_mask])
else:
stats_data_mean = np.nan
stats_data_median = np.nan
stats_data_std = np.inf
robust_mean, robust_median, robust_std = np.nan, np.nan, np.inf
self.stats_info = {'xrange':[x0,x1], 'yrange':[y0,y1],
'mean':stats_data_mean, 'median':stats_data_median, 'std':stats_data_std,
'min':stats_data.min(), 'max':stats_data.max()} # want min/max to reflect any Inf/NaN
self.mean_textctrl.SetValue("{:0.4g}".format(self.stats_info['mean']))
self.median_textctrl.SetValue("{:0.4g}".format(self.stats_info['median']))
self.stdev_textctrl.SetValue("{:0.4g}".format(self.stats_info['std']))
self.stats_info['robust-mean'] = robust_mean
self.stats_info['robust-median'] = robust_median
self.stats_info['robust-std'] = robust_std
self.robust_mean_textctrl.SetValue("{:0.4g}".format(robust_mean))
self.robust_stdev_textctrl.SetValue("{:0.4g}".format(robust_std))
self.minval_textctrl.SetValue("{:0.4g}".format(self.stats_info['min']))
self.maxval_textctrl.SetValue("{:0.4g}".format(self.stats_info['max']))
wmin = np.where(stats_data == stats_data.min())
wmin = [(wmin[1][i] + x0,wmin[0][i] + y0) for i in np.arange(wmin[0].size)]
if len(wmin) == 1:
wmin = wmin[0]
self.minpos_textctrl.SetValue("{}".format(wmin))
self.stats_info['wmin'] = wmin
wmax = np.where(stats_data == stats_data.max())
wmax = [(wmax[1][i] + x0,wmax[0][i] + y0) for i in np.arange(wmax[0].size)]
if len(wmax) == 1:
wmax = wmax[0]
self.maxpos_textctrl.SetValue("{}".format(wmax))
self.stats_info['wmax'] = wmax
set_textctrl_background_color(self.x0_textctrl, 'ok')
set_textctrl_background_color(self.x1_textctrl, 'ok')
set_textctrl_background_color(self.xsize_textctrl, 'ok')
set_textctrl_background_color(self.y0_textctrl, 'ok')
set_textctrl_background_color(self.y1_textctrl, 'ok')
set_textctrl_background_color(self.ysize_textctrl, 'ok')
def x0_textctrl_changed(self, evt):
validate_textctrl_str(self.x0_textctrl, int, self.last_string_values['x0'])
def x0_textctrl_entered(self, evt):
if validate_textctrl_str(self.x0_textctrl, int, self.last_string_values['x0']):
self.last_string_values['x0'] = self.x0_textctrl.GetValue()
self.update_stats_box(int(self.last_string_values['x0']), None, None, None)
self.x0_textctrl.SetSelection(-1, -1)
self.redraw_overplot_on_image()
def xsize_textctrl_changed(self, evt):
validate_textctrl_str(self.xsize_textctrl, int, self.last_string_values['xsize'])
def xsize_textctrl_entered(self, evt):
if validate_textctrl_str(self.xsize_textctrl, int, self.last_string_values['xsize']):
self.last_string_values['xsize'] = self.xsize_textctrl.GetValue()
xsize = int(self.last_string_values['xsize'])
sys.stderr.write("\n\nxsize = {}\n\n".format(xsize))
x0,y0,x1,y1 = self.get_x0y0x1y1_from_stats_rect()
xc = (x0 + x1) / 2.
x0 = max(0, int(xc - xsize / 2.))
x1 = x0 + xsize - 1
x1 = min(x1, self.ztv_frame.display_image.shape[1] - 1)
x0 = x1 - xsize + 1
x0 = max(0, int(xc - xsize / 2.))
self.update_stats_box(x0, y0, x1, y1)
self.xsize_textctrl.SetSelection(-1, -1)
self.redraw_overplot_on_image()
def x1_textctrl_changed(self, evt):
validate_textctrl_str(self.x1_textctrl, int, self.last_string_values['x1'])
def x1_textctrl_entered(self, evt):
if validate_textctrl_str(self.x1_textctrl, int, self.last_string_values['x1']):
self.last_string_values['x1'] = self.x1_textctrl.GetValue()
self.update_stats_box(None, None, int(self.last_string_values['x1']), None)
self.x1_textctrl.SetSelection(-1, -1)
self.redraw_overplot_on_image()
def y0_textctrl_changed(self, evt):
validate_textctrl_str(self.y0_textctrl, int, self.last_string_values['y0'])
def y0_textctrl_entered(self, evt):
if validate_textctrl_str(self.y0_textctrl, int, self.last_string_values['y0']):
self.last_string_values['y0'] = self.y0_textctrl.GetValue()
self.update_stats_box(None, int(self.last_string_values['y0']), None, None)
self.y0_textctrl.SetSelection(-1, -1)
self.redraw_overplot_on_image()
def ysize_textctrl_changed(self, evt):
validate_textctrl_str(self.ysize_textctrl, int, self.last_string_values['ysize'])
def ysize_textctrl_entered(self, evt):
if validate_textctrl_str(self.ysize_textctrl, int, self.last_string_values['ysize']):
self.last_string_values['ysize'] = self.ysize_textctrl.GetValue()
ysize = int(self.last_string_values['ysize'])
x0,y0,x1,y1 = self.get_x0y0x1y1_from_stats_rect()
yc = (y0 + y1) / 2.
y0 = max(0, int(yc - ysize / 2.))
y1 = y0 + ysize - 1
y1 = min(y1, self.ztv_frame.display_image.shape[0] - 1)
y0 = y1 - ysize + 1
y0 = max(0, int(yc - ysize / 2.))
self.update_stats_box(x0, y0, x1, y1)
self.ysize_textctrl.SetSelection(-1, -1)
self.redraw_overplot_on_image()
def y1_textctrl_changed(self, evt):
validate_textctrl_str(self.y1_textctrl, int, self.last_string_values['y1'])
def y1_textctrl_entered(self, evt):
if validate_textctrl_str(self.y1_textctrl, int, self.last_string_values['y1']):
self.last_string_values['y1'] = self.y1_textctrl.GetValue()
self.update_stats_box(None, None, None, int(self.last_string_values['y1']))
self.y1_textctrl.SetSelection(-1, -1)
self.redraw_overplot_on_image()
| mit |
Dekken/tick | tick/linear_model/tests/logistic_regression_test.py | 2 | 24388 | # License: BSD 3 clause
import itertools
import unittest
import numpy as np
from sklearn.metrics.ranking import roc_auc_score
from tick.base.inference import InferenceTest
from tick.linear_model import SimuLogReg, LogisticRegression
from tick.simulation import weights_sparse_gauss
from tick.preprocessing.features_binarizer import FeaturesBinarizer
from tick.prox import ProxZero, ProxL1, ProxL2Sq, ProxElasticNet, ProxTV, \
ProxBinarsity
solvers = ['gd', 'agd', 'sgd', 'sdca', 'bfgs', 'svrg']
penalties = ['none', 'l2', 'l1', 'tv', 'elasticnet', 'binarsity']
class Test(InferenceTest):
def setUp(self):
self.float_1 = 5.23e-4
self.float_2 = 3.86e-2
self.int_1 = 3198
self.int_2 = 230
self.X = np.zeros((5, 5))
self.y = np.zeros(5)
self.y[0] = 1
@staticmethod
def get_train_data(n_features=20, n_samples=3000, nnz=5):
np.random.seed(12)
weights0 = weights_sparse_gauss(n_features, nnz=nnz)
interc0 = 0.1
features, y = SimuLogReg(weights0, interc0, n_samples=n_samples,
verbose=False).simulate()
return features, y
def test_LogisticRegression_fit(self):
"""...Test LogisticRegression fit with different solvers and penalties
"""
sto_seed = 179312
raw_features, y = Test.get_train_data()
for fit_intercept in [True, False]:
for penalty in penalties:
if penalty == 'binarsity':
# binarize features
n_cuts = 3
binarizer = FeaturesBinarizer(n_cuts=n_cuts)
features = binarizer.fit_transform(raw_features)
else:
features = raw_features
for solver in solvers:
solver_kwargs = {
'penalty': penalty,
'tol': 1e-5,
'solver': solver,
'verbose': False,
'max_iter': 10,
'fit_intercept': fit_intercept
}
if penalty != 'none':
solver_kwargs['C'] = 100
if penalty == 'binarsity':
solver_kwargs['blocks_start'] = binarizer.blocks_start
solver_kwargs[
'blocks_length'] = binarizer.blocks_length
if solver == 'sdca':
solver_kwargs['sdca_ridge_strength'] = 2e-2
if solver in ['sgd', 'svrg', 'sdca']:
solver_kwargs['random_state'] = sto_seed
if solver == 'sgd':
solver_kwargs['step'] = 1.
if solver == 'bfgs':
# BFGS only accepts ProxZero and ProxL2sq for now
if penalty not in ['none', 'l2']:
continue
learner = LogisticRegression(**solver_kwargs)
learner.fit(features, y)
probas = learner.predict_proba(features)[:, 1]
auc = roc_auc_score(y, probas)
self.assertGreater(
auc, 0.7, "solver %s with penalty %s and "
"intercept %s reached too low AUC" % (solver, penalty,
fit_intercept))
def test_LogisticRegression_warm_start(self):
"""...Test LogisticRegression warm start
"""
sto_seed = 179312
X, y = Test.get_train_data()
fit_intercepts = [True, False]
cases = itertools.product(solvers, fit_intercepts)
for solver, fit_intercept in cases:
solver_kwargs = {
'solver': solver,
'max_iter': 2,
'fit_intercept': fit_intercept,
'warm_start': True,
'tol': 0
}
if solver == 'sdca':
msg = '^SDCA cannot be warm started$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(**solver_kwargs)
else:
if solver in ['sgd', 'svrg']:
solver_kwargs['random_state'] = sto_seed
if solver == 'sgd':
solver_kwargs['step'] = .3
learner = LogisticRegression(**solver_kwargs)
learner.fit(X, y)
if fit_intercept:
coeffs_1 = np.hstack((learner.weights, learner.intercept))
else:
coeffs_1 = learner.weights
learner.fit(X, y)
if fit_intercept:
coeffs_2 = np.hstack((learner.weights, learner.intercept))
else:
coeffs_2 = learner.weights
# Thanks to warm start objective should have decreased
self.assertLess(
learner._solver_obj.objective(coeffs_2),
learner._solver_obj.objective(coeffs_1))
@staticmethod
def specific_solver_kwargs(solver):
"""...A simple method to as systematically some kwargs to our tests
"""
return dict()
def test_LogisticRegression_settings(self):
"""...Test LogisticRegression basic settings
"""
# solver
from tick.solver import AGD, GD, BFGS, SGD, SVRG, SDCA
solver_class_map = {
'gd': GD,
'agd': AGD,
'sgd': SGD,
'svrg': SVRG,
'bfgs': BFGS,
'sdca': SDCA
}
for solver in solvers:
learner = LogisticRegression(solver=solver,
**Test.specific_solver_kwargs(solver))
solver_class = solver_class_map[solver]
self.assertTrue(isinstance(learner._solver_obj, solver_class))
msg = '^``solver`` must be one of agd, bfgs, gd, sdca, sgd, ' \
'svrg, got wrong_name$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(solver='wrong_name')
# prox
prox_class_map = {
'none': ProxZero,
'l1': ProxL1,
'l2': ProxL2Sq,
'elasticnet': ProxElasticNet,
'tv': ProxTV,
'binarsity': ProxBinarsity
}
for penalty in penalties:
if penalty == 'binarsity':
learner = LogisticRegression(penalty=penalty, blocks_start=[0],
blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty)
prox_class = prox_class_map[penalty]
self.assertTrue(isinstance(learner._prox_obj, prox_class))
msg = '^``penalty`` must be one of binarsity, elasticnet, l1, l2, none, ' \
'tv, got wrong_name$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(penalty='wrong_name')
def test_LogisticRegression_model_settings(self):
"""...Test LogisticRegression setting of parameters of model
"""
for solver in solvers:
learner = LogisticRegression(fit_intercept=True, solver=solver)
self.assertEqual(learner.fit_intercept, True)
self.assertEqual(learner._model_obj.fit_intercept, True)
learner.fit_intercept = False
self.assertEqual(learner.fit_intercept, False)
self.assertEqual(learner._model_obj.fit_intercept, False)
learner = LogisticRegression(fit_intercept=False, solver=solver)
self.assertEqual(learner.fit_intercept, False)
self.assertEqual(learner._model_obj.fit_intercept, False)
learner.fit_intercept = True
self.assertEqual(learner.fit_intercept, True)
self.assertEqual(learner._model_obj.fit_intercept, True)
def test_LogisticRegression_penalty_C(self):
"""...Test LogisticRegression setting of parameter of C
"""
for penalty in penalties:
if penalty != 'none':
if penalty == 'binarsity':
learner = LogisticRegression(
penalty=penalty, C=self.float_1, blocks_start=[0],
blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty,
C=self.float_1)
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)
learner.C = self.float_2
self.assertEqual(learner.C, self.float_2)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_2)
msg = '^``C`` must be positive, got -1$'
with self.assertRaisesRegex(ValueError, msg):
if penalty == 'binarsity':
LogisticRegression(penalty=penalty, C=-1,
blocks_start=[0], blocks_length=[1])
else:
LogisticRegression(penalty=penalty, C=-1)
else:
msg = '^You cannot set C for penalty "%s"$' % penalty
with self.assertWarnsRegex(RuntimeWarning, msg):
if penalty == 'binarsity':
LogisticRegression(penalty=penalty, C=self.float_1,
blocks_start=[0], blocks_length=[1])
else:
LogisticRegression(penalty=penalty, C=self.float_1)
if penalty == 'binarsity':
learner = LogisticRegression(
penalty=penalty, blocks_start=[0], blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty)
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.C = self.float_1
msg = '^``C`` must be positive, got -2$'
with self.assertRaisesRegex(ValueError, msg):
learner.C = -2
def test_LogisticRegression_penalty_elastic_net_ratio(self):
"""...Test LogisticRegression setting of parameter of elastic_net_ratio
"""
ratio_1 = 0.6
ratio_2 = 0.3
for penalty in penalties:
if penalty == 'elasticnet':
learner = LogisticRegression(penalty=penalty, C=self.float_1,
elastic_net_ratio=ratio_1)
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner.elastic_net_ratio, ratio_1)
self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)
self.assertEqual(learner._prox_obj.ratio, ratio_1)
learner.elastic_net_ratio = ratio_2
self.assertEqual(learner.C, self.float_1)
self.assertEqual(learner.elastic_net_ratio, ratio_2)
self.assertEqual(learner._prox_obj.ratio, ratio_2)
else:
msg = '^Penalty "%s" has no elastic_net_ratio attribute$$' % \
penalty
with self.assertWarnsRegex(RuntimeWarning, msg):
if penalty == 'binarsity':
LogisticRegression(penalty=penalty,
elastic_net_ratio=0.8,
blocks_start=[0], blocks_length=[1])
else:
LogisticRegression(penalty=penalty,
elastic_net_ratio=0.8)
if penalty == 'binarsity':
learner = LogisticRegression(
penalty=penalty, blocks_start=[0], blocks_length=[1])
else:
learner = LogisticRegression(penalty=penalty)
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.elastic_net_ratio = ratio_1
def test_LogisticRegression_solver_basic_settings(self):
"""...Test LogisticRegression setting of basic parameters of solver
"""
for solver in solvers:
# tol
learner = LogisticRegression(solver=solver, tol=self.float_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.tol, self.float_1)
self.assertEqual(learner._solver_obj.tol, self.float_1)
learner.tol = self.float_2
self.assertEqual(learner.tol, self.float_2)
self.assertEqual(learner._solver_obj.tol, self.float_2)
# max_iter
learner = LogisticRegression(solver=solver, max_iter=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.max_iter, self.int_1)
self.assertEqual(learner._solver_obj.max_iter, self.int_1)
learner.max_iter = self.int_2
self.assertEqual(learner.max_iter, self.int_2)
self.assertEqual(learner._solver_obj.max_iter, self.int_2)
# verbose
learner = LogisticRegression(solver=solver, verbose=True,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.verbose, True)
self.assertEqual(learner._solver_obj.verbose, True)
learner.verbose = False
self.assertEqual(learner.verbose, False)
self.assertEqual(learner._solver_obj.verbose, False)
learner = LogisticRegression(solver=solver, verbose=False,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.verbose, False)
self.assertEqual(learner._solver_obj.verbose, False)
learner.verbose = True
self.assertEqual(learner.verbose, True)
self.assertEqual(learner._solver_obj.verbose, True)
# print_every
learner = LogisticRegression(solver=solver, print_every=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.print_every, self.int_1)
self.assertEqual(learner._solver_obj.print_every, self.int_1)
learner.print_every = self.int_2
self.assertEqual(learner.print_every, self.int_2)
self.assertEqual(learner._solver_obj.print_every, self.int_2)
# record_every
learner = LogisticRegression(solver=solver,
record_every=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.record_every, self.int_1)
self.assertEqual(learner._solver_obj.record_every, self.int_1)
learner.record_every = self.int_2
self.assertEqual(learner.record_every, self.int_2)
self.assertEqual(learner._solver_obj.record_every, self.int_2)
def test_LogisticRegression_solver_step(self):
"""...Test LogisticRegression setting of step parameter of solver
"""
for solver in solvers:
if solver in ['sdca', 'bfgs']:
msg = '^Solver "%s" has no settable step$' % solver
with self.assertWarnsRegex(RuntimeWarning, msg):
learner = LogisticRegression(
solver=solver, step=1,
**Test.specific_solver_kwargs(solver))
self.assertIsNone(learner.step)
else:
learner = LogisticRegression(
solver=solver, step=self.float_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.step, self.float_1)
self.assertEqual(learner._solver_obj.step, self.float_1)
learner.step = self.float_2
self.assertEqual(learner.step, self.float_2)
self.assertEqual(learner._solver_obj.step, self.float_2)
if solver in ['sgd']:
msg = '^SGD step needs to be tuned manually$'
with self.assertWarnsRegex(RuntimeWarning, msg):
learner = LogisticRegression(solver='sgd')
learner.fit(self.X, self.y)
def test_LogisticRegression_solver_random_state(self):
"""...Test LogisticRegression setting of random_state parameter of solver
"""
for solver in solvers:
if solver in ['bfgs', 'agd', 'gd']:
msg = '^Solver "%s" has no settable random_state$' % solver
with self.assertWarnsRegex(RuntimeWarning, msg):
learner = LogisticRegression(
solver=solver, random_state=1,
**Test.specific_solver_kwargs(solver))
self.assertIsNone(learner.random_state)
else:
learner = LogisticRegression(
solver=solver, random_state=self.int_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.random_state, self.int_1)
self.assertEqual(learner._solver_obj.seed, self.int_1)
msg = '^random_state must be positive, got -1$'
with self.assertRaisesRegex(ValueError, msg):
LogisticRegression(solver=solver, random_state=-1,
**Test.specific_solver_kwargs(solver))
msg = '^random_state is readonly in LogisticRegression$'
with self.assertRaisesRegex(AttributeError, msg):
learner = LogisticRegression(
solver=solver, **Test.specific_solver_kwargs(solver))
learner.random_state = self.int_2
def test_LogisticRegression_solver_sdca_ridge_strength(self):
"""...Test LogisticRegression setting of sdca_ridge_strength parameter
of solver
"""
for solver in solvers:
if solver == 'sdca':
learner = LogisticRegression(
solver=solver, sdca_ridge_strength=self.float_1,
**Test.specific_solver_kwargs(solver))
self.assertEqual(learner.sdca_ridge_strength, self.float_1)
self.assertEqual(learner._solver_obj._solver.get_l_l2sq(),
self.float_1)
learner.sdca_ridge_strength = self.float_2
self.assertEqual(learner.sdca_ridge_strength, self.float_2)
self.assertEqual(learner._solver_obj._solver.get_l_l2sq(),
self.float_2)
else:
msg = '^Solver "%s" has no sdca_ridge_strength attribute$' % \
solver
with self.assertWarnsRegex(RuntimeWarning, msg):
LogisticRegression(solver=solver, sdca_ridge_strength=1e-2,
**Test.specific_solver_kwargs(solver))
learner = LogisticRegression(
solver=solver, **Test.specific_solver_kwargs(solver))
with self.assertWarnsRegex(RuntimeWarning, msg):
learner.sdca_ridge_strength = self.float_1
def test_safe_array_cast(self):
"""...Test error and warnings raised by LogLearner constructor
"""
msg = '^Copying array of size \(5, 5\) to convert it in the ' \
'right format$'
with self.assertWarnsRegex(RuntimeWarning, msg):
LogisticRegression._safe_array(self.X.astype(int))
msg = '^Copying array of size \(3, 5\) to create a ' \
'C-contiguous version of it$'
with self.assertWarnsRegex(RuntimeWarning, msg):
LogisticRegression._safe_array(self.X[::2])
np.testing.assert_array_equal(self.X,
LogisticRegression._safe_array(self.X))
def test_labels_encoding(self):
"""...Test that class encoding is well done for LogReg
"""
learner = LogisticRegression(max_iter=1)
np.random.seed(38027)
n_features = 3
n_samples = 5
X = np.random.rand(n_samples, n_features)
encoded_y = np.array([1., -1., 1., -1., -1.])
learner.fit(X, encoded_y)
np.testing.assert_array_equal(learner.classes, [-1., 1.])
np.testing.assert_array_equal(
learner._encode_labels_vector(encoded_y), encoded_y)
zero_one_y = np.array([1., 0., 1., 0., 0.])
learner.fit(X, zero_one_y)
np.testing.assert_array_equal(learner.classes, [0., 1.])
np.testing.assert_array_equal(
learner._encode_labels_vector(zero_one_y), encoded_y)
text_y = np.array(['cat', 'dog', 'cat', 'dog', 'dog'])
learner.fit(X, text_y)
np.testing.assert_array_equal(set(learner.classes), {'cat', 'dog'})
encoded_text_y = learner._encode_labels_vector(text_y)
np.testing.assert_array_equal(
encoded_text_y,
encoded_y * np.sign(encoded_text_y[0]) * np.sign(encoded_y[0]))
def test_predict(self):
"""...Test LogReg prediction
"""
labels_mappings = [{
-1: -1.,
1: 1.
}, {
-1: 1.,
1: -1.
}, {
-1: 1,
1: 0
}, {
-1: 0,
1: 1
}, {
-1: 'cat',
1: 'dog'
}]
for labels_mapping in labels_mappings:
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
y = np.vectorize(labels_mapping.get)(y)
learner = LogisticRegression(random_state=32789, tol=1e-9)
learner.fit(X, y)
X_test, y_test = Test.get_train_data(n_features=12, n_samples=5,
nnz=0)
predicted_y = [1., 1., -1., 1., 1.]
predicted_y = np.vectorize(labels_mapping.get)(predicted_y)
np.testing.assert_array_equal(learner.predict(X_test), predicted_y)
def test_predict_proba(self):
"""...Test LogReg predict_proba
"""
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
learner = LogisticRegression(random_state=32289, tol=1e-13)
learner.fit(X, y)
X_test, y_test = Test.get_train_data(n_features=12, n_samples=5, nnz=0)
predicted_probas = np.array(
[[0.35851418, 0.64148582], [0.42549328, 0.57450672],
[0.6749705, 0.3250295], [0.39684181,
0.60315819], [0.42732443, 0.57267557]])
np.testing.assert_array_almost_equal(
learner.predict_proba(X_test), predicted_probas, decimal=3)
def test_decision_function(self):
"""...Test LogReg predict_proba
"""
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
learner = LogisticRegression(random_state=32789, tol=1e-13)
learner.fit(X, y)
X_test, y_test = Test.get_train_data(n_features=12, n_samples=5, nnz=0)
decision_function_values = np.array(
[0.58182, 0.30026, -0.73075, 0.41864, 0.29278])
np.testing.assert_array_almost_equal(
learner.decision_function(X_test), decision_function_values,
decimal=3)
def test_float_double_arrays_fitting(self):
X, y = Test.get_train_data(n_features=12, n_samples=300, nnz=0)
learner_64 = LogisticRegression(random_state=32789, tol=1e-13)
learner_64.fit(X, y)
weights_64 = learner_64.weights
self.assertEqual(weights_64.dtype, np.dtype('float64'))
learner_32 = LogisticRegression(random_state=32789, tol=1e-13)
X_32, y_32 = X.astype('float32'), y.astype('float32')
learner_32.fit(X_32, y_32)
weights_32 = learner_32.weights
self.assertEqual(weights_32.dtype, np.dtype('float32'))
np.testing.assert_array_almost_equal(weights_32, weights_64, decimal=5)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
fredhohman/a-viz-of-ice-and-fire | scripts/slice_clean_dialogue_general.py | 1 | 2079 | # author: istewart6 (some help from ssoni)
"""
Code to break subtitle dialogue
into 60 equal-length clean slices per episode.
"""
from __future__ import division
import pandas as pd
from datetime import datetime
import re, os
import argparse
from clean_extracted_text import clean_text
def convert_time(time_):
new_time = datetime.strptime(time_, '%H:%M:%S,%f')
new_time = new_time.hour * 60 + new_time.minute + new_time.second / 60
return new_time
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--sub_file', default='../data/subtitles/subtitlesInTSV/finding_nemo.tsv')
args = parser.parse_args()
sub_file = args.sub_file
sub_name = os.path.basename(sub_file).replace('.tsv', '_clean')
out_dir = os.path.dirname(sub_file)
# slice_length = 2
n_slices = 60
sub_data = pd.read_csv(sub_file, sep='\t', index_col=0)
# sub_data = pd.read_csv(sub_file, sep='\t')
end = sub_data['endTime'].max()
end = convert_time(end)
print('got end %s'%(end))
slice_length = end / n_slices
# print('about to convert end time data %s'%(e_data['endTime']))
slices = sub_data.apply(lambda r : int(convert_time(r['startTime']) / slice_length),
axis=1)
sub_data['slice'] = slices
# clean shit
sub_data['dialogue'] = sub_data['dialogue'].apply(clean_text)
# TODO: also get rid of duplicate lines
clean_rows = []
row_count = sub_data.shape[0]
for i, r in 0sub_data.iterrows():
if(i > 0 and i < row_count-1):
current_dialogue = r['dialogue'].lower().strip()
last_dialogue = sub_data.ix[i-1, 'dialogue'].lower().strip()
if(current_dialogue != last_dialogue):
r = pd.DataFrame(r).transpose()
clean_rows.append(r)
print('got %d/%d clean rows'%(len(clean_rows), sub_data.shape[0]))
sub_data = pd.concat(clean_rows, axis=0)
out_name = os.path.join(out_dir, '%s.tsv'%(sub_name))
sub_data.to_csv(out_name, sep='\t', index=False)
if __name__ == '__main__':
main()
| mit |
mkness/TheCannon | code/makeplots_talks/makeplot_fits_self_cluster_ages.py | 1 | 6112 | #!/usr/bin/python
import scipy
import numpy
import pickle
from numpy import *
from scipy import ndimage
from scipy import interpolate
from numpy import loadtxt
import os
import numpy as np
from numpy import *
import matplotlib
from pylab import rcParams
from pylab import *
from matplotlib import pyplot
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.pyplot import axes
from matplotlib.pyplot import colorbar
#from matplotlib.ticker import NullFormatter
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
s = matplotlib.font_manager.FontProperties()
s.set_family('serif')
s.set_size(14)
from matplotlib import rc
rc('text', usetex=False)
rc('font', family='serif')
def plotfits():
# file_in = "self_tags.pickle"
file_in = "self_2nd_order_tags.pickle"
file_in2 = open(file_in, 'r')
params, icovs_params = pickle.load(file_in2)
params = array(params)
file_in2.close()
filein2 = 'starsin_test2.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
filein2 = 'starsin_new_all_ordered.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
filein2 = 'test4_selfg.txt' # this is for self test this is dangerous - need to implement the same logg cut here, this is original data values or otherwise metadata
filein3 = 'ages.txt'
a = open(filein2)
al = a.readlines()
names = []
for each in al:
names.append(each.split()[1])
unames = unique(names)
starind = arange(0,len(names), 1)
name_ind = []
names = array(names)
for each in unames:
takeit = each == names
name_ind.append(np.int(starind[takeit][-1]+1. ) )
cluster_ind = [0] + list(sort(name_ind))# + [len(al)]
plot_markers = ['ko', 'yo', 'ro', 'bo', 'co','k*', 'y*', 'r*', 'b*', 'c*', 'ks', 'rs', 'bs', 'cs', 'rd', 'kd', 'bd', 'cd', 'mo', 'ms' ]
t,g,feh,t_err,feh_err = loadtxt(filein2, usecols = (4,6,8,16,17), unpack =1)
tA,gA,fehA = loadtxt(filein2, usecols = (3,5,7), unpack =1)
age = loadtxt(filein3, usecols = (0,), unpack =1)
#
g_err = [0]*len(g)
g_err = array(g_err)
diffT = abs(array(t) - array(tA) )
pick = diffT < 4000.
t,g,feh,t_err,g_err,feh_err = t[pick], g[pick], feh[pick], t_err[pick], g_err[pick], feh_err[pick]
age = age[pick]
#
g_err = [0]*len(g)
age_err = [0]*len(g)
g_err = array(g_err)
age_err = array(age_err)
params = array(params)
covs_params = np.linalg.inv(icovs_params)
rcParams['figure.figsize'] = 12.0, 10.0
fig, temp = pyplot.subplots(4,1, sharex=False, sharey=False)
fig = plt.figure()
ax = fig.add_subplot(111, frameon = 0 )
ax.set_ylabel("The Cannon", labelpad = 40, fontsize = 20 )
ax.tick_params(labelcolor= 'w', top = 'off', bottom = 'off', left = 'off', right = 'off' )
ax1 = fig.add_subplot(411)
ax2 = fig.add_subplot(412)
ax3 = fig.add_subplot(413)
ax4 = fig.add_subplot(414)
#ax1 = temp[0]
#ax2 = temp[1]
#ax3 = temp[2]
#ax4 = temp[3]
params_labels = [params[:,0], params[:,1], params[:,2] , params[:,3], covs_params[:,0,0]**0.5, covs_params[:,1,1]**0.5, covs_params[:,2,2]**0.5 , covs_params[:,3,3]**0.5]
cval = ['k', 'b', 'r', 'c']
input_ASPCAP = [t, g, feh, age, t_err, g_err, feh_err, age_err]
listit_1 = [0,1,2,3]
listit_2 = [1,0,0,0]
axs = [ax1,ax2,ax3,ax4]
labels = ['teff', 'logg', 'Fe/H', 'age' ]
for i in range(0,len(cluster_ind)-1):
indc1 = cluster_ind[i]
indc2 = cluster_ind[i+1]
for ax, num,num2,label1,x1,y1 in zip(axs, listit_1,listit_2,labels, [4800,3.0,0.3,0.3], [3400,1,-1.5,5]):
pick = logical_and(g[indc1:indc2] > 0, logical_and(t_err[indc1:indc2] < 300, feh[indc1:indc2] > -4.0) )
cind = array(input_ASPCAP[1][indc1:indc2][pick])
cind = array(input_ASPCAP[num2][indc1:indc2][pick]).flatten()
ax.plot(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick], plot_markers[i])
#ax.errorbar(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick],yerr= params_labels[num+3][indc1:indc2][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
#ax.errorbar(input_ASPCAP[num][indc1:indc2][pick], params_labels[num][indc1:indc2][pick],xerr=input_ASPCAP[num+3][indc1:indc2][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
#ax.text(x1,y1,"y-axis, $<\sigma>$ = "+str(round(mean(params_labels[num+3][pick]),2)),fontsize = 14)
ax1.plot([0,6000], [0,6000], linewidth = 1.5, color = 'k' )
ax2.plot([0,5], [0,5], linewidth = 1.5, color = 'k' )
ax3.plot([-3,2], [-3,2], linewidth = 1.5, color = 'k' )
ax4.plot([-5,25], [-5,25], linewidth = 1.5, color = 'k' )
ax1.set_xlim(3500, 5500)
ax1.set_ylim(3500, 5500)
ax2.set_xlim(0, 5)
ax2.set_ylim(0, 5)
ax3.set_xlim(-3, 2)
ax4.set_xlim(-3, 20)
ax4.set_ylim(-3, 20)
ax1.set_xlabel("ASPCAP Teff (IR flux method) , [K]", fontsize = 14,labelpad = 5)
ax1.set_ylabel("Teff, [K]", fontsize = 14,labelpad = 10)
ax2.set_xlabel("ASPCAP logg (Kepler correction) , [dex]", fontsize = 14,labelpad = 5)
ax2.set_ylabel("logg, [dex]", fontsize = 14,labelpad = 10)
ax3.set_xlabel("ASPCAP [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax3.set_ylabel(" [Fe/H], [dex]", fontsize = 14,labelpad = 10)
ax4.set_ylabel("Age [Gyr]", fontsize = 14,labelpad = 5)
ax4.set_xlabel("Literature Ages [Gyr]", fontsize = 14,labelpad = 10)
# attach lines to plots
fig.subplots_adjust(hspace=0.44)
return
def savefig(fig, prefix, **kwargs):
for suffix in (".eps", ".png"):
print "writing %s" % (prefix + suffix)
fig.savefig(prefix + suffix, **kwargs)
if __name__ == "__main__": #args in command line
wl1,wl2,wl3,wl4,wl5,wl6 = 15392, 15697, 15958.8, 16208.6, 16120.4, 16169.5
plotfits()
| mit |
gmartinvela/OpenRocket | generate_statistics_from_SD.py | 1 | 7682 | import fileinput
import math
import collections
import time
import numpy as np
from pylab import *
from matplotlib import pyplot as plt
import matplotlib.mlab as mlab
#file_path = '/media/ABB4-4F3A/DATALOG.TXT'
file_path = 'DATALOG.TXT'
def split_in_blocks(txt_file, pattern):
'''
Find the last appears of the text that indicate a new flight and divide in the number of blocks generated by the rocket
Return: A list that contains all the different blocks of data and a list containing the header.
'''
num_times_find_pattern = []
for num_line, line in enumerate(fileinput.input(txt_file)):
if pattern in line:
num_times_find_pattern.append(num_line)
if num_line == 0:
header = list(line.strip().split(","))
#print header
blocks_of_data = []
with open(txt_file) as f:
lines = f.readlines()
for num_header_line in num_times_find_pattern:
if num_header_line == 0:
num_header_line_prev = num_header_line
else:
block_lines = lines[num_header_line_prev + 1 : num_header_line - 1]
blocks_of_data.append(block_lines)
num_header_line_prev = num_header_line
block_lines = lines[num_header_line_prev + 1 : num_line + 1]
blocks_of_data.append(block_lines)
return blocks_of_data, header
def manage_data_from_blocks(blocks, header):
'''
Divide al the text in blocks tagged with their type of data (accelaration, temperature, ...) continued by a number of block
Return: A dict that contains all the different types of data diferentiated and numbered.
'''
# TODO: Automatize this function to accept more headers!!
blocks_dict = collections.OrderedDict()
for block_number, block in enumerate(blocks):
for item in header:
blocks_dict['%s%s' % (item,block_number)] = []
for line in block:
line_list = line.strip().split(",")
blocks_dict['f%s' % block_number].append(int(line_list[0]))
blocks_dict['ax%s' % block_number].append(float(line_list[1]))
blocks_dict['ay%s' % block_number].append(float(line_list[2]))
blocks_dict['az%s' % block_number].append(float(line_list[3]))
blocks_dict['gx%s' % block_number].append(float(line_list[4]))
blocks_dict['gy%s' % block_number].append(float(line_list[5]))
blocks_dict['gz%s' % block_number].append(float(line_list[6]))
blocks_dict['mx%s' % block_number].append(float(line_list[7]))
blocks_dict['my%s' % block_number].append(float(line_list[8]))
blocks_dict['mz%s' % block_number].append(float(line_list[9]))
blocks_dict['t%s' % block_number].append(float(line_list[10]))
blocks_dict['p%s' % block_number].append(int(line_list[11]))
blocks_dict['h%s' % block_number].append(float(line_list[12]))
return blocks_dict
def process_data(blocks_dict, header):
block_list_header_based = []
for num, item in enumerate(header):
block_list_header_based.append([])
for block in blocks_dict:
if block.startswith(header[num]):
block_list_header_based[num].append(block)
# DEBUG! print "%s: %s" % (block, blocks_dict[block])
print block_list_header_based
#fingerprint_basic_info = basic_process_only_for_fingerprints(block_list_header_based[0])
temp_basic_info = basic_process_data(block_list_header_based[12])
#height_basic_info = basic_process_data(block_list_header_based[12])
print_basic_histograms(block_list_header_based[12])
print_basic_scatters(block_list_header_based[12])
print_basic_evolution_2_axis(block_list_header_based[0], block_list_header_based[12])
def basic_process_only_for_fingerprints(fingerprints):
fingerprint_basic_info = collections.OrderedDict()
fingerprint_list = []
for num, fingerprint_block in enumerate(fingerprints):
millis_interval_list = []
for position, millis in enumerate(blocks_dict[fingerprint_block]):
if position != 0:
millis_interval = millis - millis_prev
millis_interval_list.append(millis_interval)
millis_prev = millis
blocks_dict["fp%s" % (num)] = millis_interval_list
fingerprint_list.append("fp%s" % (num))
fingerprint_basic_info = basic_process_data(fingerprint_list)
return fingerprint_basic_info
def basic_process_data(data_list):
data_basic_info = collections.OrderedDict()
for data_block in data_list:
data_basic_info[data_block] = {}
data_avg_mean = np.mean(blocks_dict[data_block]) # Average
data_avg_weighted = np.average(blocks_dict[data_block]) # Average weighted
data_amax = np.amax(blocks_dict[data_block]) # MAX
data_amin = np.amin(blocks_dict[data_block]) # MIN
data_med = np.median(blocks_dict[data_block]) # Median
data_std = np.std(blocks_dict[data_block]) # Standard deviation
data_ptp = np.ptp(blocks_dict[data_block]) # Distance MAX to MIN
data_var = np.var(blocks_dict[data_block]) # Variance
data_basic_info[data_block] = {"AVM" : "%.3f" % data_avg_mean, "AVW" : "%.3f" % data_avg_weighted, "MAX" : "%.3f" % data_amax,
"MIN" : "%.3f" % data_amin, "MED" : "%.3f" % data_med, "STD" : "%.3f" % data_std,
"PTP" : "%.3f" % data_ptp, "VAR" : "%.3f" % data_var}
# PLOT NORMAL PDF FROM THA DATA
#sigma = sqrt(data_var)
#x = np.linspace(data_amin,data_amax)
#plt.plot(x,mlab.normpdf(x,data_avg_mean,sigma))
plt.show()
for key in data_basic_info:
print data_basic_info[key]
return data_basic_info
def print_basic_histograms(data_list):
#plt.ion()
plt.figure(1)
for num, data in enumerate(data_list):
nrows = int(math.ceil(float(len(data_list) / 3.0)))
ncols = 3
subplot_index = "%s%s%s" % (nrows, ncols, num + 1)
plt.subplot(subplot_index)
plt.hist(blocks_dict[data], bins=20)
#data_new = np.histogramdd(blocks_dict[data])
#plt.hist(data_new, bins=20)
plt.xlabel("Value", fontsize=8)
plt.ylabel("Frequency", fontsize=8)
plt.suptitle("Gaussian Histogram", fontsize=12)
plt.show()
#plt.show(block=True)
def print_basic_scatters(data_list):
#plt.ion()
plt.figure(1)
for num, data in enumerate(data_list):
nrows = int(math.ceil(float(len(data_list) / 3.0)))
ncols = 3
subplot_index = "%s%s%s" % (nrows, ncols, num + 1)
plt.subplot(subplot_index)
plt.scatter(np.random.randn(1000), np.random.randn(1000))
plt.suptitle("Gaussian Histogram", fontsize=12)
plt.show()
#plt.show(block=True)
def print_basic_evolution_2_axis(x_axis_data_list, y_axis_data_list):
plt.figure(1)
for num in range(len(x_axis_data_list)):
x = blocks_dict[x_axis_data_list[num]]
y = blocks_dict[y_axis_data_list[num]]
#subplot(nrows, ncols, plot_number)
nrows = int(math.ceil(float(len(x_axis_data_list) / 3.0)))
ncols = 3
subplot_index = "%s%s%s" % (nrows, ncols, num + 1)
plt.subplot(subplot_index)
plt.plot(x, y, linewidth=1.0, color="green")
xlabel('time (milliseconds)', fontsize = 8)
#ylabel('temperature (C)', fontsize = 8)
#title('', fontsize=10)
grid(True)
plt.xticks(blocks_dict[x_axis_data_list[num]][::len(blocks_dict[x_axis_data_list[num]])/10], rotation=30, fontsize=8)
#plt.annotate('Despegue', xy=(2200, 34.82), xytext=(2300, 34.88),
# bbox=dict(boxstyle="round", fc="0.8"),
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
#plt.annotate('Paracaidas', xy=(7200, 34.82), xytext=(6300, 34.88),
# arrowprops=dict(facecolor='black', shrink=0.05),
# )
#axvline(x=2200)
#axhspan(34.80, 34.82, facecolor='0.5', alpha=0.5, color="red")
plt.ylim(min(blocks_dict[y_axis_data_list[num]]) - 0.02, max(blocks_dict[y_axis_data_list[num]]) + 0.02)
plt.yticks(fontsize=8)
#plt.suptitle('temperatures in data', fontsize=12)
plt.show()
#start = time.time()
blocks, header = split_in_blocks(file_path, "m")
blocks_dict = manage_data_from_blocks(blocks, header)
process_data(blocks_dict, header)
#stop = time.time()
#total_time = stop -start
#print total_time | mit |
Eric89GXL/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 6 | 9808 | import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_multilabel_classification():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator=True,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_equal(X[:, 0], t * np.cos(t))
assert_array_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_equal(X[:, 0], np.sin(t))
assert_array_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
dsm054/pandas | pandas/tests/indexes/datetimes/test_partial_slicing.py | 1 | 15592 | """ test partial slicing on Series/Frame """
from datetime import datetime
import operator as op
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, Series, Timedelta, Timestamp, date_range)
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
class TestSlicing(object):
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
assert v1 == Timestamp('2/28/2005')
assert v2 == Timestamp('4/30/2005')
assert v3 == Timestamp('6/30/2005')
# don't carry freq through irregular slicing
assert dti2.freq is None
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
assert dr[1:].name == dr.name
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
date_range('2014-01-01', periods=20, freq='MS'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp(
'2014-10-01'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
date_range('2014-01-01', periods=20, freq='MS'))
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts[::0]
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts.loc[::0]
with pytest.raises(ValueError, match='slice step cannot be zero'):
ts.loc[::0]
def test_slice_bounds_empty(self):
# GH 14354
empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015')
right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')
exp = Timestamp('2015-01-02 23:59:59.999999999')
assert right == exp
left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')
exp = Timestamp('2015-01-02 00:00:00')
assert left == exp
def test_slice_duplicate_monotonic(self):
# https://github.com/pandas-dev/pandas/issues/16515
idx = pd.DatetimeIndex(['2017', '2017'])
result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc')
expected = Timestamp('2017-01-01')
assert result == expected
def test_monotone_DTI_indexing_bug(self):
# GH 19362
# Testing accessing the first element in a montononic descending
# partial string indexing.
df = pd.DataFrame(list(range(5)))
date_list = ['2018-01-02', '2017-02-10', '2016-03-10',
'2015-03-15', '2014-03-16']
date_index = pd.to_datetime(date_list)
df['date'] = date_index
expected = pd.DataFrame({0: list(range(5)), 'date': date_index})
tm.assert_frame_equal(df, expected)
df = pd.DataFrame({'A': [1, 2, 3]},
index=pd.date_range('20170101',
periods=3)[::-1])
expected = pd.DataFrame({'A': 1},
index=pd.date_range('20170103',
periods=1))
tm.assert_frame_equal(df.loc['2017-01-03'], expected)
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.loc['2005']
expected = df[df.index.year == 2005]
tm.assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
assert result == expected
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2001Q1']) == 90
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['1Q01']) == 90
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2005-11']) == 30
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['2005-11']) == 30
tm.assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
tm.assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-1-1']
assert result == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
tm.assert_series_equal(result, s.iloc[:24])
pytest.raises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60 * 4])
result = s['2005-1-1 20']
tm.assert_series_equal(result, s.iloc[:60])
assert s['2005-1-1 20:00'] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
tm.assert_series_equal(result, s.iloc[:60])
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60])
assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slice_second_precision(self):
rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59,
microsecond=999990),
periods=20, freq='US')
s = Series(np.arange(20), rng)
tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:])
tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:])
assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0]
with pytest.raises(KeyError, match='2005-1-1 00:00:00'):
s['2005-1-1 00:00:00']
def test_partial_slicing_dataframe(self):
# GH14856
# Test various combinations of string slicing resolution vs.
# index resolution
# - If string resolution is less precise than index resolution,
# string is considered a slice
# - If string resolution is equal to or more precise than index
# resolution, string is considered an exact match
formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H',
'%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S']
resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second']
for rnum, resolution in enumerate(resolutions[2:], 2):
# we check only 'day', 'hour', 'minute' and 'second'
unit = Timedelta("1 " + resolution)
middate = datetime(2012, 1, 1, 0, 0, 0)
index = DatetimeIndex([middate - unit,
middate, middate + unit])
values = [1, 2, 3]
df = DataFrame({'a': values}, index, dtype=np.int64)
assert df.index.resolution == resolution
# Timestamp with the same resolution as index
# Should be exact match for Series (return scalar)
# and raise KeyError for Frame
for timestamp, expected in zip(index, values):
ts_string = timestamp.strftime(formats[rnum])
# make ts_string as precise as index
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == expected
pytest.raises(KeyError, df.__getitem__, ts_string)
# Timestamp with resolution less precise than index
for fmt in formats[:rnum]:
for element, theslice in [[0, slice(None, 1)],
[1, slice(1, None)]]:
ts_string = index[element].strftime(fmt)
# Series should return slice
result = df['a'][ts_string]
expected = df['a'][theslice]
tm.assert_series_equal(result, expected)
# Frame should return slice as well
result = df[ts_string]
expected = df[theslice]
tm.assert_frame_equal(result, expected)
# Timestamp with resolution more precise than index
# Compatible with existing key
# Should return scalar for Series
# and raise KeyError for Frame
for fmt in formats[rnum + 1:]:
ts_string = index[1].strftime(fmt)
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == 2
pytest.raises(KeyError, df.__getitem__, ts_string)
# Not compatible with existing key
# Should raise KeyError
for fmt, res in list(zip(formats, resolutions))[rnum + 1:]:
ts = index[1] + Timedelta("1 " + res)
ts_string = ts.strftime(fmt)
pytest.raises(KeyError, df['a'].__getitem__, ts_string)
pytest.raises(KeyError, df.__getitem__, ts_string)
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT': ["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER': ["ABC", "MNP", "XYZ", "XYZ"],
'val': [1, 2, 3, 4]},
index=date_range("2013-06-19 09:30:00",
periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([
[1]
], index=Index(['ABC'], name='TICKER'), columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
tm.assert_frame_equal(result, expected)
expected = df_multi.loc[
(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
tm.assert_series_equal(result, expected)
# this is an IndexingError as we don't do partial string selection on
# multi-levels.
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
pytest.raises(IndexingError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range(
'2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
tm.assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
tm.assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.xs('2000-1-4')
result = df2.loc[pd.Timestamp('2000-1-4')]
tm.assert_frame_equal(result, expected)
def test_partial_slice_doesnt_require_monotonicity(self):
# For historical reasons.
s = pd.Series(np.arange(10), pd.date_range('2014-01-01', periods=10))
nonmonotonic = s[[3, 5, 4]]
expected = nonmonotonic.iloc[:0]
timestamp = pd.Timestamp('2014-01-10')
tm.assert_series_equal(nonmonotonic['2014-01-10':], expected)
with pytest.raises(KeyError,
match=r"Timestamp\('2014-01-10 00:00:00'\)"):
nonmonotonic[timestamp:]
tm.assert_series_equal(nonmonotonic.loc['2014-01-10':], expected)
with pytest.raises(KeyError,
match=r"Timestamp\('2014-01-10 00:00:00'\)"):
nonmonotonic.loc[timestamp:]
def test_loc_datetime_length_one(self):
# GH16071
df = pd.DataFrame(columns=['1'],
index=pd.date_range('2016-10-01T00:00:00',
'2016-10-01T23:59:59'))
result = df.loc[datetime(2016, 10, 1):]
tm.assert_frame_equal(result, df)
result = df.loc['2016-10-01T00:00:00':]
tm.assert_frame_equal(result, df)
@pytest.mark.parametrize('datetimelike', [
Timestamp('20130101'), datetime(2013, 1, 1),
np.datetime64('2013-01-01T00:00', 'ns')])
@pytest.mark.parametrize('op,expected', [
(op.lt, [True, False, False, False]),
(op.le, [True, True, False, False]),
(op.eq, [False, True, False, False]),
(op.gt, [False, False, False, True])])
def test_selection_by_datetimelike(self, datetimelike, op, expected):
# GH issue #17965, test for ability to compare datetime64[ns] columns
# to datetimelike
df = DataFrame({'A': [pd.Timestamp('20120101'),
pd.Timestamp('20130101'),
np.nan, pd.Timestamp('20130103')]})
result = op(df.A, datetimelike)
expected = Series(expected, name='A')
tm.assert_series_equal(result, expected)
| bsd-3-clause |
oew1v07/scikit-image | doc/examples/plot_convex_hull.py | 4 | 1481 | """
===========
Convex Hull
===========
The convex hull of a binary image is the set of pixels included in the
smallest convex polygon that surround all white pixels in the input.
In this example, we show how the input pixels (white) get filled in by the
convex hull (white and grey).
A good overview of the algorithm is given on `Steve Eddin's blog
<http://blogs.mathworks.com/steve/2011/10/04/binary-image-convex-hull-algorithm-notes/>`__.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import convex_hull_image
image = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=float)
original_image = np.copy(image)
chull = convex_hull_image(image)
image[chull] += 1
# image is now:
#[[ 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 0. 2. 0. 0. 0. 0.]
# [ 0. 0. 0. 2. 1. 2. 0. 0. 0.]
# [ 0. 0. 2. 1. 1. 1. 2. 0. 0.]
# [ 0. 2. 1. 1. 1. 1. 1. 2. 0.]
# [ 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6))
ax1.set_title('Original picture')
ax1.imshow(original_image, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_xticks([]), ax1.set_yticks([])
ax2.set_title('Transformed picture')
ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax2.set_xticks([]), ax2.set_yticks([])
plt.show()
| bsd-3-clause |
xray/xray | xarray/tests/test_indexing.py | 3 | 26721 | import itertools
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable
from xarray.core import indexing, nputils
from . import IndexerMaker, ReturnItem, assert_array_equal, raises_regex
B = IndexerMaker(indexing.BasicIndexer)
class TestIndexers:
def set_to_zero(self, x, i):
x = x.copy()
x[i] = 0
return x
def test_expanded_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
arr = ReturnItem()
for i in [
arr[:],
arr[...],
arr[0, :, 10],
arr[..., 10],
arr[:5, ..., 0],
arr[..., 0, :],
arr[y],
arr[y, y],
arr[..., y, y],
arr[..., 0, 1, 2, 3, 4],
]:
j = indexing.expanded_indexer(i, x.ndim)
assert_array_equal(x[i], x[j])
assert_array_equal(self.set_to_zero(x, i), self.set_to_zero(x, j))
with raises_regex(IndexError, "too many indices"):
indexing.expanded_indexer(arr[1, 2, 3], 2)
def test_asarray_tuplesafe(self):
res = indexing._asarray_tuplesafe(("a", 1))
assert isinstance(res, np.ndarray)
assert res.ndim == 0
assert res.item() == ("a", 1)
res = indexing._asarray_tuplesafe([(0,), (1,)])
assert res.shape == (2,)
assert res[0] == (0,)
assert res[1] == (1,)
def test_stacked_multiindex_min_max(self):
data = np.random.randn(3, 23, 4)
da = DataArray(
data,
name="value",
dims=["replicate", "rsample", "exp"],
coords=dict(
replicate=[0, 1, 2], exp=["a", "b", "c", "d"], rsample=list(range(23))
),
)
da2 = da.stack(sample=("replicate", "rsample"))
s = da2.sample
assert_array_equal(da2.loc["a", s.max()], data[2, 22, 0])
assert_array_equal(da2.loc["b", s.min()], data[0, 0, 1])
def test_convert_label_indexer(self):
# TODO: add tests that aren't just for edge cases
index = pd.Index([1, 2, 3])
with raises_regex(KeyError, "not all values found"):
indexing.convert_label_indexer(index, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(index, 0)
with raises_regex(ValueError, "does not have a MultiIndex"):
indexing.convert_label_indexer(index, {"one": 0})
mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
with raises_regex(KeyError, "not all values found"):
indexing.convert_label_indexer(mindex, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(mindex, 0)
with pytest.raises(ValueError):
indexing.convert_label_indexer(index, {"three": 0})
with pytest.raises(IndexError):
indexing.convert_label_indexer(mindex, (slice(None), 1, "no_level"))
def test_convert_unsorted_datetime_index_raises(self):
index = pd.to_datetime(["2001", "2000", "2002"])
with pytest.raises(KeyError):
# pandas will try to convert this into an array indexer. We should
# raise instead, so we can be sure the result of indexing with a
# slice is always a view.
indexing.convert_label_indexer(index, slice("2001", "2002"))
def test_get_dim_indexers(self):
mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
mdata = DataArray(range(4), [("x", mindex)])
dim_indexers = indexing.get_dim_indexers(mdata, {"one": "a", "two": 1})
assert dim_indexers == {"x": {"one": "a", "two": 1}}
with raises_regex(ValueError, "cannot combine"):
indexing.get_dim_indexers(mdata, {"x": "a", "two": 1})
with raises_regex(ValueError, "do not exist"):
indexing.get_dim_indexers(mdata, {"y": "a"})
with raises_regex(ValueError, "do not exist"):
indexing.get_dim_indexers(mdata, {"four": 1})
def test_remap_label_indexers(self):
def test_indexer(data, x, expected_pos, expected_idx=None):
pos, idx = indexing.remap_label_indexers(data, {"x": x})
assert_array_equal(pos.get("x"), expected_pos)
assert_array_equal(idx.get("x"), expected_idx)
data = Dataset({"x": ("x", [1, 2, 3])})
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
mdata = DataArray(range(8), [("x", mindex)])
test_indexer(data, 1, 0)
test_indexer(data, np.int32(1), 0)
test_indexer(data, Variable([], 1), 0)
test_indexer(mdata, ("a", 1, -1), 0)
test_indexer(
mdata,
("a", 1),
[True, True, False, False, False, False, False, False],
[-1, -2],
)
test_indexer(
mdata,
"a",
slice(0, 4, None),
pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
)
test_indexer(
mdata,
("a",),
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
)
test_indexer(mdata, [("a", 1, -1), ("b", 2, -2)], [0, 7])
test_indexer(mdata, slice("a", "b"), slice(0, 8, None))
test_indexer(mdata, slice(("a", 1), ("b", 1)), slice(0, 6, None))
test_indexer(mdata, {"one": "a", "two": 1, "three": -1}, 0)
test_indexer(
mdata,
{"one": "a", "two": 1},
[True, True, False, False, False, False, False, False],
[-1, -2],
)
test_indexer(
mdata,
{"one": "a", "three": -1},
[True, False, True, False, False, False, False, False],
[1, 2],
)
test_indexer(
mdata,
{"one": "a"},
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
)
def test_read_only_view(self):
arr = DataArray(
np.random.rand(3, 3),
coords={"x": np.arange(3), "y": np.arange(3)},
dims=("x", "y"),
) # Create a 2D DataArray
arr = arr.expand_dims({"z": 3}, -1) # New dimension 'z'
arr["z"] = np.arange(3) # New coords to dimension 'z'
with pytest.raises(ValueError, match="Do you want to .copy()"):
arr.loc[0, 0, 0] = 999
class TestLazyArray:
def test_slice_slice(self):
arr = ReturnItem()
for size in [100, 99]:
# We test even/odd size cases
x = np.arange(size)
slices = [
arr[:3],
arr[:4],
arr[2:4],
arr[:1],
arr[:-1],
arr[5:-1],
arr[-5:-1],
arr[::-1],
arr[5::-1],
arr[:3:-1],
arr[:30:-1],
arr[10:4:],
arr[::4],
arr[4:4:4],
arr[:4:-4],
arr[::-2],
]
for i in slices:
for j in slices:
expected = x[i][j]
new_slice = indexing.slice_slice(i, j, size=size)
actual = x[new_slice]
assert_array_equal(expected, actual)
def test_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v = Variable(["i", "j", "k"], original)
lazy = indexing.LazilyOuterIndexedArray(x)
v_lazy = Variable(["i", "j", "k"], lazy)
arr = ReturnItem()
# test orthogonally applied indexers
indexers = [arr[:], 0, -2, arr[:3], [0, 1, 2, 3], [0], np.arange(10) < 5]
for i in indexers:
for j in indexers:
for k in indexers:
if isinstance(j, np.ndarray) and j.dtype.kind == "b":
j = np.arange(20) < 5
if isinstance(k, np.ndarray) and k.dtype.kind == "b":
k = np.arange(30) < 5
expected = np.asarray(v[i, j, k])
for actual in [
v_lazy[i, j, k],
v_lazy[:, j, k][i],
v_lazy[:, :, k][:, j][i],
]:
assert expected.shape == actual.shape
assert_array_equal(expected, actual)
assert isinstance(
actual._data, indexing.LazilyOuterIndexedArray
)
# make sure actual.key is appropriate type
if all(
isinstance(k, (int, slice)) for k in v_lazy._data.key.tuple
):
assert isinstance(v_lazy._data.key, indexing.BasicIndexer)
else:
assert isinstance(v_lazy._data.key, indexing.OuterIndexer)
# test sequentially applied indexers
indexers = [
(3, 2),
(arr[:], 0),
(arr[:2], -1),
(arr[:4], [0]),
([4, 5], 0),
([0, 1, 2], [0, 1]),
([0, 3, 5], arr[:2]),
]
for i, j in indexers:
expected = v[i][j]
actual = v_lazy[i][j]
assert expected.shape == actual.shape
assert_array_equal(expected, actual)
# test transpose
if actual.ndim > 1:
order = np.random.choice(actual.ndim, actual.ndim)
order = np.array(actual.dims)
transposed = actual.transpose(*order)
assert_array_equal(expected.transpose(*order), transposed)
assert isinstance(
actual._data,
(
indexing.LazilyVectorizedIndexedArray,
indexing.LazilyOuterIndexedArray,
),
)
assert isinstance(actual._data, indexing.LazilyOuterIndexedArray)
assert isinstance(actual._data.array, indexing.NumpyIndexingAdapter)
def test_vectorized_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v_eager = Variable(["i", "j", "k"], x)
lazy = indexing.LazilyOuterIndexedArray(x)
v_lazy = Variable(["i", "j", "k"], lazy)
arr = ReturnItem()
def check_indexing(v_eager, v_lazy, indexers):
for indexer in indexers:
actual = v_lazy[indexer]
expected = v_eager[indexer]
assert expected.shape == actual.shape
assert isinstance(
actual._data,
(
indexing.LazilyVectorizedIndexedArray,
indexing.LazilyOuterIndexedArray,
),
)
assert_array_equal(expected, actual)
v_eager = expected
v_lazy = actual
# test orthogonal indexing
indexers = [(arr[:], 0, 1), (Variable("i", [0, 1]),)]
check_indexing(v_eager, v_lazy, indexers)
# vectorized indexing
indexers = [
(Variable("i", [0, 1]), Variable("i", [0, 1]), slice(None)),
(slice(1, 3, 2), 0),
]
check_indexing(v_eager, v_lazy, indexers)
indexers = [
(slice(None, None, 2), 0, slice(None, 10)),
(Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
(Variable(["i", "j"], [[0, 1], [1, 2]]),),
]
check_indexing(v_eager, v_lazy, indexers)
indexers = [
(Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
(Variable(["i", "j"], [[0, 1], [1, 2]]),),
]
check_indexing(v_eager, v_lazy, indexers)
class TestCopyOnWriteArray:
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
wrapped[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.zeros(10))
def test_sub_array(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.CopyOnWriteArray)
child[B[:]] = 0
assert_array_equal(original, np.arange(10))
assert_array_equal(wrapped, np.arange(10))
assert_array_equal(child, np.zeros(5))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.CopyOnWriteArray(np.array(["foo", "bar"]))
assert np.array(x[B[0]][B[()]]) == "foo"
class TestMemoryCachedArray:
def test_wrapper(self):
original = indexing.LazilyOuterIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
assert_array_equal(wrapped, np.arange(10))
assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter)
def test_sub_array(self):
original = indexing.LazilyOuterIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
child = wrapped[B[:5]]
assert isinstance(child, indexing.MemoryCachedArray)
assert_array_equal(child, np.arange(5))
assert isinstance(child.array, indexing.NumpyIndexingAdapter)
assert isinstance(wrapped.array, indexing.LazilyOuterIndexedArray)
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.MemoryCachedArray(original)
wrapped[B[:]] = 0
assert_array_equal(original, np.zeros(10))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.MemoryCachedArray(np.array(["foo", "bar"]))
assert np.array(x[B[0]][B[()]]) == "foo"
def test_base_explicit_indexer():
with pytest.raises(TypeError):
indexing.ExplicitIndexer(())
class Subclass(indexing.ExplicitIndexer):
pass
value = Subclass((1, 2, 3))
assert value.tuple == (1, 2, 3)
assert repr(value) == "Subclass((1, 2, 3))"
@pytest.mark.parametrize(
"indexer_cls",
[indexing.BasicIndexer, indexing.OuterIndexer, indexing.VectorizedIndexer],
)
def test_invalid_for_all(indexer_cls):
with pytest.raises(TypeError):
indexer_cls(None)
with pytest.raises(TypeError):
indexer_cls(([],))
with pytest.raises(TypeError):
indexer_cls((None,))
with pytest.raises(TypeError):
indexer_cls(("foo",))
with pytest.raises(TypeError):
indexer_cls((1.0,))
with pytest.raises(TypeError):
indexer_cls((slice("foo"),))
with pytest.raises(TypeError):
indexer_cls((np.array(["foo"]),))
def check_integer(indexer_cls):
value = indexer_cls((1, np.uint64(2))).tuple
assert all(isinstance(v, int) for v in value)
assert value == (1, 2)
def check_slice(indexer_cls):
(value,) = indexer_cls((slice(1, None, np.int64(2)),)).tuple
assert value == slice(1, None, 2)
assert isinstance(value.step, int)
def check_array1d(indexer_cls):
(value,) = indexer_cls((np.arange(3, dtype=np.int32),)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, [0, 1, 2])
def check_array2d(indexer_cls):
array = np.array([[1, 2], [3, 4]], dtype=np.int64)
(value,) = indexer_cls((array,)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, array)
def test_basic_indexer():
check_integer(indexing.BasicIndexer)
check_slice(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array1d(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.BasicIndexer)
def test_outer_indexer():
check_integer(indexing.OuterIndexer)
check_slice(indexing.OuterIndexer)
check_array1d(indexing.OuterIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.OuterIndexer)
def test_vectorized_indexer():
with pytest.raises(TypeError):
check_integer(indexing.VectorizedIndexer)
check_slice(indexing.VectorizedIndexer)
check_array1d(indexing.VectorizedIndexer)
check_array2d(indexing.VectorizedIndexer)
with raises_regex(ValueError, "numbers of dimensions"):
indexing.VectorizedIndexer(
(np.array(1, dtype=np.int64), np.arange(5, dtype=np.int64))
)
class Test_vectorized_indexer:
@pytest.fixture(autouse=True)
def setup(self):
self.data = indexing.NumpyIndexingAdapter(np.random.randn(10, 12, 13))
self.indexers = [
np.array([[0, 3, 2]]),
np.array([[0, 3, 3], [4, 6, 7]]),
slice(2, -2, 2),
slice(2, -2, 3),
slice(None),
]
def test_arrayize_vectorized_indexer(self):
for i, j, k in itertools.product(self.indexers, repeat=3):
vindex = indexing.VectorizedIndexer((i, j, k))
vindex_array = indexing._arrayize_vectorized_indexer(
vindex, self.data.shape
)
np.testing.assert_array_equal(self.data[vindex], self.data[vindex_array])
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((slice(None),)), shape=(5,)
)
np.testing.assert_array_equal(actual.tuple, [np.arange(5)])
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((np.arange(5),) * 3), shape=(8, 10, 12)
)
expected = np.stack([np.arange(5)] * 3)
np.testing.assert_array_equal(np.stack(actual.tuple), expected)
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((np.arange(5), slice(None))), shape=(8, 10)
)
a, b = actual.tuple
np.testing.assert_array_equal(a, np.arange(5)[:, np.newaxis])
np.testing.assert_array_equal(b, np.arange(10)[np.newaxis, :])
actual = indexing._arrayize_vectorized_indexer(
indexing.VectorizedIndexer((slice(None), np.arange(5))), shape=(8, 10)
)
a, b = actual.tuple
np.testing.assert_array_equal(a, np.arange(8)[np.newaxis, :])
np.testing.assert_array_equal(b, np.arange(5)[:, np.newaxis])
def get_indexers(shape, mode):
if mode == "vectorized":
indexed_shape = (3, 4)
indexer = tuple(np.random.randint(0, s, size=indexed_shape) for s in shape)
return indexing.VectorizedIndexer(indexer)
elif mode == "outer":
indexer = tuple(np.random.randint(0, s, s + 2) for s in shape)
return indexing.OuterIndexer(indexer)
elif mode == "outer_scalar":
indexer = (np.random.randint(0, 3, 4), 0, slice(None, None, 2))
return indexing.OuterIndexer(indexer[: len(shape)])
elif mode == "outer_scalar2":
indexer = (np.random.randint(0, 3, 4), -2, slice(None, None, 2))
return indexing.OuterIndexer(indexer[: len(shape)])
elif mode == "outer1vec":
indexer = [slice(2, -3) for s in shape]
indexer[1] = np.random.randint(0, shape[1], shape[1] + 2)
return indexing.OuterIndexer(tuple(indexer))
elif mode == "basic": # basic indexer
indexer = [slice(2, -3) for s in shape]
indexer[0] = 3
return indexing.BasicIndexer(tuple(indexer))
elif mode == "basic1": # basic indexer
return indexing.BasicIndexer((3,))
elif mode == "basic2": # basic indexer
indexer = [0, 2, 4]
return indexing.BasicIndexer(tuple(indexer[: len(shape)]))
elif mode == "basic3": # basic indexer
indexer = [slice(None) for s in shape]
indexer[0] = slice(-2, 2, -2)
indexer[1] = slice(1, -1, 2)
return indexing.BasicIndexer(tuple(indexer[: len(shape)]))
@pytest.mark.parametrize("size", [100, 99])
@pytest.mark.parametrize(
"sl", [slice(1, -1, 1), slice(None, -1, 2), slice(-1, 1, -1), slice(-1, 1, -2)]
)
def test_decompose_slice(size, sl):
x = np.arange(size)
slice1, slice2 = indexing._decompose_slice(sl, size)
expected = x[sl]
actual = x[slice1][slice2]
assert_array_equal(expected, actual)
@pytest.mark.parametrize("shape", [(10, 5, 8), (10, 3)])
@pytest.mark.parametrize(
"indexer_mode",
[
"vectorized",
"outer",
"outer_scalar",
"outer_scalar2",
"outer1vec",
"basic",
"basic1",
"basic2",
"basic3",
],
)
@pytest.mark.parametrize(
"indexing_support",
[
indexing.IndexingSupport.BASIC,
indexing.IndexingSupport.OUTER,
indexing.IndexingSupport.OUTER_1VECTOR,
indexing.IndexingSupport.VECTORIZED,
],
)
def test_decompose_indexers(shape, indexer_mode, indexing_support):
data = np.random.randn(*shape)
indexer = get_indexers(shape, indexer_mode)
backend_ind, np_ind = indexing.decompose_indexer(indexer, shape, indexing_support)
expected = indexing.NumpyIndexingAdapter(data)[indexer]
array = indexing.NumpyIndexingAdapter(data)[backend_ind]
if len(np_ind.tuple) > 0:
array = indexing.NumpyIndexingAdapter(array)[np_ind]
np.testing.assert_array_equal(expected, array)
if not all(isinstance(k, indexing.integer_types) for k in np_ind.tuple):
combined_ind = indexing._combine_indexers(backend_ind, shape, np_ind)
array = indexing.NumpyIndexingAdapter(data)[combined_ind]
np.testing.assert_array_equal(expected, array)
def test_implicit_indexing_adapter():
array = np.arange(10, dtype=np.int64)
implicit = indexing.ImplicitToExplicitIndexingAdapter(
indexing.NumpyIndexingAdapter(array), indexing.BasicIndexer
)
np.testing.assert_array_equal(array, np.asarray(implicit))
np.testing.assert_array_equal(array, implicit[:])
def test_implicit_indexing_adapter_copy_on_write():
array = np.arange(10, dtype=np.int64)
implicit = indexing.ImplicitToExplicitIndexingAdapter(
indexing.CopyOnWriteArray(array)
)
assert isinstance(implicit[:], indexing.ImplicitToExplicitIndexingAdapter)
def test_outer_indexer_consistency_with_broadcast_indexes_vectorized():
def nonzero(x):
if isinstance(x, np.ndarray) and x.dtype.kind == "b":
x = x.nonzero()[0]
return x
original = np.random.rand(10, 20, 30)
v = Variable(["i", "j", "k"], original)
arr = ReturnItem()
# test orthogonally applied indexers
indexers = [
arr[:],
0,
-2,
arr[:3],
np.array([0, 1, 2, 3]),
np.array([0]),
np.arange(10) < 5,
]
for i, j, k in itertools.product(indexers, repeat=3):
if isinstance(j, np.ndarray) and j.dtype.kind == "b": # match size
j = np.arange(20) < 4
if isinstance(k, np.ndarray) and k.dtype.kind == "b":
k = np.arange(30) < 8
_, expected, new_order = v._broadcast_indexes_vectorized((i, j, k))
expected_data = nputils.NumpyVIndexAdapter(v.data)[expected.tuple]
if new_order:
old_order = range(len(new_order))
expected_data = np.moveaxis(expected_data, old_order, new_order)
outer_index = indexing.OuterIndexer((nonzero(i), nonzero(j), nonzero(k)))
actual = indexing._outer_to_numpy_indexer(outer_index, v.shape)
actual_data = v.data[actual]
np.testing.assert_array_equal(actual_data, expected_data)
def test_create_mask_outer_indexer():
indexer = indexing.OuterIndexer((np.array([0, -1, 2]),))
expected = np.array([False, True, False])
actual = indexing.create_mask(indexer, (5,))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2])))
expected = np.array(2 * [[False, True, False]])
actual = indexing.create_mask(indexer, (5, 5, 5))
np.testing.assert_array_equal(expected, actual)
def test_create_mask_vectorized_indexer():
indexer = indexing.VectorizedIndexer((np.array([0, -1, 2]), np.array([0, 1, -1])))
expected = np.array([False, True, True])
actual = indexing.create_mask(indexer, (5,))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.VectorizedIndexer(
(np.array([0, -1, 2]), slice(None), np.array([0, 1, -1]))
)
expected = np.array([[False, True, True]] * 2).T
actual = indexing.create_mask(indexer, (5, 2))
np.testing.assert_array_equal(expected, actual)
def test_create_mask_basic_indexer():
indexer = indexing.BasicIndexer((-1,))
actual = indexing.create_mask(indexer, (3,))
np.testing.assert_array_equal(True, actual)
indexer = indexing.BasicIndexer((0,))
actual = indexing.create_mask(indexer, (3,))
np.testing.assert_array_equal(False, actual)
def test_create_mask_dask():
da = pytest.importorskip("dask.array")
indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2])))
expected = np.array(2 * [[False, True, False]])
actual = indexing.create_mask(
indexer, (5, 5, 5), da.empty((2, 3), chunks=((1, 1), (2, 1)))
)
assert actual.chunks == ((1, 1), (2, 1))
np.testing.assert_array_equal(expected, actual)
indexer = indexing.VectorizedIndexer(
(np.array([0, -1, 2]), slice(None), np.array([0, 1, -1]))
)
expected = np.array([[False, True, True]] * 2).T
actual = indexing.create_mask(
indexer, (5, 2), da.empty((3, 2), chunks=((3,), (2,)))
)
assert isinstance(actual, da.Array)
np.testing.assert_array_equal(expected, actual)
with pytest.raises(ValueError):
indexing.create_mask(indexer, (5, 2), da.empty((5,), chunks=(1,)))
def test_create_mask_error():
with raises_regex(TypeError, "unexpected key type"):
indexing.create_mask((1, 2), (3, 4))
@pytest.mark.parametrize(
"indices, expected",
[
(np.arange(5), np.arange(5)),
(np.array([0, -1, -1]), np.array([0, 0, 0])),
(np.array([-1, 1, -1]), np.array([1, 1, 1])),
(np.array([-1, -1, 2]), np.array([2, 2, 2])),
(np.array([-1]), np.array([0])),
(np.array([0, -1, 1, -1, -1]), np.array([0, 0, 1, 1, 1])),
(np.array([0, -1, -1, -1, 1]), np.array([0, 0, 0, 0, 1])),
],
)
def test_posify_mask_subindexer(indices, expected):
actual = indexing._posify_mask_subindexer(indices)
np.testing.assert_array_equal(expected, actual)
| apache-2.0 |
taylorhxu/pybrain | examples/rl/environments/shipsteer/shipbench_sde.py | 26 | 3454 | from __future__ import print_function
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with SPE on the ShipSteering Environment
#
# Requirements:
# pybrain (tested on rev. 1195, ship env rev. 1202)
# Synopsis:
# shipbenchm.py [<True|False> [logfile]]
# (first argument is graphics flag)
#########################################################################
__author__ = "Martin Felder, Thomas Rueckstiess"
__version__ = '$Id$'
#---
# default backend GtkAgg does not plot properly on Ubuntu 8.04
import matplotlib
matplotlib.use('TkAgg')
#---
from pybrain.rl.environments.shipsteer import ShipSteeringEnvironment
from pybrain.rl.environments.shipsteer import GoNorthwardTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners.directsearch.enac import ENAC
from pybrain.rl.experiments.episodic import EpisodicExperiment
from pybrain.tools.shortcuts import buildNetwork
from pybrain.tools.plotting import MultilinePlotter
from pylab import figure, ion
from scipy import mean
import sys
if len(sys.argv) > 1:
useGraphics = eval(sys.argv[1])
else:
useGraphics = False
# create task
env=ShipSteeringEnvironment()
maxsteps = 500
task = GoNorthwardTask(env=env, maxsteps = maxsteps)
# task.env.setRenderer( CartPoleRenderer())
# create controller network
#net = buildNetwork(task.outdim, 7, task.indim, bias=True, outputbias=False)
net = buildNetwork(task.outdim, task.indim, bias=False)
#net.initParams(0.0)
# create agent
learner = ENAC()
learner.gd.rprop = True
# only relevant for RP
learner.gd.deltamin = 0.0001
#agent.learner.gd.deltanull = 0.05
# only relevant for BP
learner.gd.alpha = 0.01
learner.gd.momentum = 0.9
agent = LearningAgent(net, learner)
agent.actaspg = False
# create experiment
experiment = EpisodicExperiment(task, agent)
# print weights at beginning
print(agent.module.params)
rewards = []
if useGraphics:
figure()
ion()
pl = MultilinePlotter(autoscale=1.2, xlim=[0, 50], ylim=[0, 1])
pl.setLineStyle(linewidth=2)
# queued version
# experiment._fillQueue(30)
# while True:
# experiment._stepQueueLoop()
# # rewards.append(mean(agent.history.getSumOverSequences('reward')))
# print agent.module.getParameters(),
# print mean(agent.history.getSumOverSequences('reward'))
# clf()
# plot(rewards)
# episodic version
x = 0
batch = 30 #number of samples per gradient estimate (was: 20; more here due to stochastic setting)
while x<5000:
#while True:
experiment.doEpisodes(batch)
x += batch
reward = mean(agent.history.getSumOverSequences('reward'))*task.rewardscale
if useGraphics:
pl.addData(0,x,reward)
print(agent.module.params)
print(reward)
#if reward > 3:
# pass
agent.learn()
agent.reset()
if useGraphics:
pl.update()
if len(sys.argv) > 2:
agent.history.saveToFile(sys.argv[1], protocol=-1, arraysonly=True)
if useGraphics:
pl.show( popup = True)
#To view what the simulation is doing at the moment set the environment with True, go to pybrain/rl/environments/ode/ and start viewer.py (python-openGL musst be installed, see PyBrain documentation)
## performance:
## experiment.doEpisodes(5) * 100 without weave:
## real 2m39.683s
## user 2m33.358s
## sys 0m5.960s
## experiment.doEpisodes(5) * 100 with weave:
##real 2m41.275s
##user 2m35.310s
##sys 0m5.192s
##
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/mpl_toolkits/axes_grid1/inset_locator.py | 6 | 9604 | from matplotlib.offsetbox import AnchoredOffsetbox
#from matplotlib.transforms import IdentityTransform
import matplotlib.transforms as mtrans
#from matplotlib.axes import Axes
from mpl_axes import Axes
from matplotlib.transforms import Bbox, TransformedBbox, IdentityTransform
from matplotlib.patches import Patch
from matplotlib.path import Path
from matplotlib.patches import Rectangle
class InsetPosition(object):
def __init__(self, parent, lbwh):
self.parent = parent
self.lbwh = lbwh # position of the inset axes in the normalized coordinate of the parent axes
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = mtrans.BboxTransformTo(bbox_parent)
bbox_inset = mtrans.Bbox.from_bounds(*self.lbwh)
bb = mtrans.TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredLocatorBase, self).__init__(loc,
pad=0., child=None,
borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = mtrans.Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = mtrans.TransformedBbox(bbox_canvas, tr)
return bb
import axes_size as Size
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
self.axes = None
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
super(AnchoredSizeLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
x, y, w, h = self.get_bbox_to_anchor().bounds
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = w*r + a*dpi
r, a = self.y_size.get_size(renderer)
height = h*r + a*dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width+2*pad, height+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredSizeLocator, self).__call__(ax, renderer)
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super(AnchoredZoomLocator, self).__init__(bbox_to_anchor, None, loc,
borderpad=borderpad,
bbox_transform=bbox_transform)
self.axes = None
def get_extent(self, renderer):
bb = mtrans.TransformedBbox(self.axes.viewLim, self.parent_axes.transData)
x, y, w, h = bb.bounds
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w*self.zoom+2*pad, h*self.zoom+2*pad, xd+pad, yd+pad
def __call__(self, ax, renderer):
self.axes = ax
return super(AnchoredZoomLocator, self).__call__(ax, renderer)
class BboxPatch(Patch):
def __init__(self, bbox, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox = bbox
def get_path(self):
x0, y0, x1, y1 = self.bbox.extents
verts = [(x0, y0),
(x1, y0),
(x1, y1),
(x0, y1),
(x0, y0),
(0,0)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
return Path(verts, codes)
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
x0, y0, x1, y1 = bbox.extents
if loc==1:
return x1, y1
elif loc==2:
return x0, y1
elif loc==3:
return x0, y0
elif loc==4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
if isinstance(bbox1, Rectangle):
transform = bbox1.get_transfrom()
bbox1 = Bbox.from_bounds(0, 0, 1, 1)
bbox1 = TransformedBbox(bbox1, transform)
if isinstance(bbox2, Rectangle):
transform = bbox2.get_transform()
bbox2 = Bbox.from_bounds(0, 0, 1, 1)
bbox2 = TransformedBbox(bbox2, transform)
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
verts = [[x1, y1], [x2,y2]]
#Path()
codes = [Path.MOVETO, Path.LINETO]
return Path(verts, codes)
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
class BboxConnectorPatch(BboxConnector):
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
if "transform" in kwargs:
raise ValueError("transform should not be set")
BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1, self.loc2b, self.loc1b)
path_merged = list(path1.vertices) + list (path2.vertices) + [path1.vertices[0]]
return Path(path_merged)
def _add_inset_axes(parent_axes, inset_axes):
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
def inset_axes(parent_axes, width, height, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = Axes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def zoomed_inset_axes(parent_axes, zoom, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
**kwargs):
if axes_class is None:
axes_class = Axes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform,
**kwargs)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
| unlicense |
DGrady/pandas | pandas/core/algorithms.py | 2 | 51643 | """
Generic data algorithms. This module is experimental at the moment and not
intended for public consumption
"""
from __future__ import division
from warnings import warn, catch_warnings
import numpy as np
from pandas import compat, _np_version_under1p8
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.generic import (
ABCSeries, ABCIndex,
ABCIndexClass, ABCCategorical)
from pandas.core.dtypes.common import (
is_unsigned_integer_dtype, is_signed_integer_dtype,
is_integer_dtype, is_complex_dtype,
is_object_dtype,
is_categorical_dtype, is_sparse,
is_period_dtype,
is_numeric_dtype, is_float_dtype,
is_bool_dtype, needs_i8_conversion,
is_categorical, is_datetimetz,
is_datetime64_any_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_interval_dtype,
is_scalar, is_list_like,
_ensure_platform_int, _ensure_object,
_ensure_float64, _ensure_uint64,
_ensure_int64)
from pandas.compat.numpy import _np_version_under1p10
from pandas.core.dtypes.missing import isna
from pandas.core import common as com
from pandas._libs import algos, lib, hashtable as htable
from pandas._libs.tslib import iNaT
# --------------- #
# dtype access #
# --------------- #
def _ensure_data(values, dtype=None):
"""
routine to ensure that our data is of the correct
input dtype for lower-level routines
This will coerce:
- ints -> int64
- uint -> uint64
- bool -> uint64 (TODO this should be uint8)
- datetimelike -> i8
- datetime64tz -> i8 (in local tz)
- categorical -> codes
Parameters
----------
values : array-like
dtype : pandas_dtype, optional
coerce to this dtype
Returns
-------
(ndarray, pandas_dtype, algo dtype as a string)
"""
# we check some simple dtypes first
try:
if is_object_dtype(dtype):
return _ensure_object(np.asarray(values)), 'object', 'object'
if is_bool_dtype(values) or is_bool_dtype(dtype):
# we are actually coercing to uint64
# until our algos suppport uint8 directly (see TODO)
return np.asarray(values).astype('uint64'), 'bool', 'uint64'
elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype):
return _ensure_int64(values), 'int64', 'int64'
elif (is_unsigned_integer_dtype(values) or
is_unsigned_integer_dtype(dtype)):
return _ensure_uint64(values), 'uint64', 'uint64'
elif is_float_dtype(values) or is_float_dtype(dtype):
return _ensure_float64(values), 'float64', 'float64'
elif is_object_dtype(values) and dtype is None:
return _ensure_object(np.asarray(values)), 'object', 'object'
elif is_complex_dtype(values) or is_complex_dtype(dtype):
# ignore the fact that we are casting to float
# which discards complex parts
with catch_warnings(record=True):
values = _ensure_float64(values)
return values, 'float64', 'float64'
except (TypeError, ValueError):
# if we are trying to coerce to a dtype
# and it is incompat this will fall thru to here
return _ensure_object(values), 'object', 'object'
# datetimelike
if (needs_i8_conversion(values) or
is_period_dtype(dtype) or
is_datetime64_any_dtype(dtype) or
is_timedelta64_dtype(dtype)):
if is_period_dtype(values) or is_period_dtype(dtype):
from pandas import PeriodIndex
values = PeriodIndex(values)
dtype = values.dtype
elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype):
from pandas import TimedeltaIndex
values = TimedeltaIndex(values)
dtype = values.dtype
else:
# Datetime
from pandas import DatetimeIndex
values = DatetimeIndex(values)
dtype = values.dtype
return values.asi8, dtype, 'int64'
elif (is_categorical_dtype(values) and
(is_categorical_dtype(dtype) or dtype is None)):
values = getattr(values, 'values', values)
values = values.codes
dtype = 'category'
# we are actually coercing to int64
# until our algos suppport int* directly (not all do)
values = _ensure_int64(values)
return values, dtype, 'int64'
# we have failed, return object
values = np.asarray(values)
return _ensure_object(values), 'object', 'object'
def _reconstruct_data(values, dtype, original):
"""
reverse of _ensure_data
Parameters
----------
values : ndarray
dtype : pandas_dtype
original : ndarray-like
Returns
-------
Index for extension types, otherwise ndarray casted to dtype
"""
from pandas import Index
if is_categorical_dtype(dtype):
pass
elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype):
values = Index(original)._shallow_copy(values, name=None)
elif is_bool_dtype(dtype):
values = values.astype(dtype)
# we only support object dtypes bool Index
if isinstance(original, Index):
values = values.astype(object)
elif dtype is not None:
values = values.astype(dtype)
return values
def _ensure_arraylike(values):
"""
ensure that we are arraylike if not already
"""
if not isinstance(values, (np.ndarray, ABCCategorical,
ABCIndexClass, ABCSeries)):
inferred = lib.infer_dtype(values)
if inferred in ['mixed', 'string', 'unicode']:
if isinstance(values, tuple):
values = list(values)
values = lib.list_to_object_array(values)
else:
values = np.asarray(values)
return values
_hashtables = {
'float64': (htable.Float64HashTable, htable.Float64Vector),
'uint64': (htable.UInt64HashTable, htable.UInt64Vector),
'int64': (htable.Int64HashTable, htable.Int64Vector),
'string': (htable.StringHashTable, htable.ObjectVector),
'object': (htable.PyObjectHashTable, htable.ObjectVector)
}
def _get_hashtable_algo(values):
"""
Parameters
----------
values : arraylike
Returns
-------
tuples(hashtable class,
vector class,
values,
dtype,
ndtype)
"""
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
else:
ndtype = 'object'
htable, table = _hashtables[ndtype]
return (htable, table, values, dtype, ndtype)
def _get_data_algo(values, func_map):
if is_categorical_dtype(values):
values = values._values_for_rank()
values, dtype, ndtype = _ensure_data(values)
if ndtype == 'object':
# its cheaper to use a String Hash Table than Object
if lib.infer_dtype(values) in ['string']:
ndtype = 'string'
f = func_map.get(ndtype, func_map['object'])
return f, values
# --------------- #
# top-level algos #
# --------------- #
def match(to_match, values, na_sentinel=-1):
"""
Compute locations of to_match into values
Parameters
----------
to_match : array-like
values to find positions of
values : array-like
Unique set of values
na_sentinel : int, default -1
Value to mark "not found"
Examples
--------
Returns
-------
match : ndarray of integers
"""
values = com._asarray_tuplesafe(values)
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
to_match, _, _ = _ensure_data(to_match, dtype)
table = htable(min(len(to_match), 1000000))
table.map_locations(values)
result = table.lookup(to_match)
if na_sentinel != -1:
# replace but return a numpy array
# use a Series because it handles dtype conversions properly
from pandas import Series
result = Series(result.ravel()).replace(-1, na_sentinel).values.\
reshape(result.shape)
return result
def unique(values):
"""
Hash table-based unique. Uniques are returned in order
of appearance. This does NOT sort.
Significantly faster than numpy.unique. Includes NA values.
Parameters
----------
values : 1d array-like
Returns
-------
unique values.
- If the input is an Index, the return is an Index
- If the input is a Categorical dtype, the return is a Categorical
- If the input is a Series/ndarray, the return will be an ndarray
Examples
--------
>>> pd.unique(pd.Series([2, 1, 3, 3]))
array([2, 1, 3])
>>> pd.unique(pd.Series([2] + [1] * 5))
array([2, 1])
>>> pd.unique(Series([pd.Timestamp('20160101'),
... pd.Timestamp('20160101')]))
array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]')
>>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')],
dtype=object)
>>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'),
... pd.Timestamp('20160101', tz='US/Eastern')]))
DatetimeIndex(['2016-01-01 00:00:00-05:00'],
... dtype='datetime64[ns, US/Eastern]', freq=None)
>>> pd.unique(list('baabc'))
array(['b', 'a', 'c'], dtype=object)
An unordered Categorical will return categories in the
order of appearance.
>>> pd.unique(Series(pd.Categorical(list('baabc'))))
[b, a, c]
Categories (3, object): [b, a, c]
>>> pd.unique(Series(pd.Categorical(list('baabc'),
... categories=list('abc'))))
[b, a, c]
Categories (3, object): [b, a, c]
An ordered Categorical preserves the category ordering.
>>> pd.unique(Series(pd.Categorical(list('baabc'),
... categories=list('abc'),
... ordered=True)))
[b, a, c]
Categories (3, object): [a < b < c]
An array of tuples
>>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')])
array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
See Also
--------
pandas.Index.unique
pandas.Series.unique
"""
values = _ensure_arraylike(values)
# categorical is a fast-path
# this will coerce Categorical, CategoricalIndex,
# and category dtypes Series to same return of Category
if is_categorical_dtype(values):
values = getattr(values, '.values', values)
return values.unique()
original = values
htable, _, values, dtype, ndtype = _get_hashtable_algo(values)
table = htable(len(values))
uniques = table.unique(values)
uniques = _reconstruct_data(uniques, dtype, original)
if isinstance(original, ABCSeries) and is_datetime64tz_dtype(dtype):
# we are special casing datetime64tz_dtype
# to return an object array of tz-aware Timestamps
# TODO: it must return DatetimeArray with tz in pandas 2.0
uniques = uniques.asobject.values
return uniques
unique1d = unique
def isin(comps, values):
"""
Compute the isin boolean array
Parameters
----------
comps: array-like
values: array-like
Returns
-------
boolean array same length as comps
"""
if not is_list_like(comps):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(comps).__name__))
if not is_list_like(values):
raise TypeError("only list-like objects are allowed to be passed"
" to isin(), you passed a "
"[{0}]".format(type(values).__name__))
if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)):
values = lib.list_to_object_array(list(values))
comps, dtype, _ = _ensure_data(comps)
values, _, _ = _ensure_data(values, dtype=dtype)
# GH11232
# work-around for numpy < 1.8 and comparisions on py3
# faster for larger cases to use np.in1d
f = lambda x, y: htable.ismember_object(x, values)
# GH16012
# Ensure np.in1d doesn't get object types or it *may* throw an exception
if ((_np_version_under1p8 and compat.PY3) or len(comps) > 1000000 and
not is_object_dtype(comps)):
f = lambda x, y: np.in1d(x, y)
elif is_integer_dtype(comps):
try:
values = values.astype('int64', copy=False)
comps = comps.astype('int64', copy=False)
f = lambda x, y: htable.ismember_int64(x, y)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
elif is_float_dtype(comps):
try:
values = values.astype('float64', copy=False)
comps = comps.astype('float64', copy=False)
checknull = isna(values).any()
f = lambda x, y: htable.ismember_float64(x, y, checknull)
except (TypeError, ValueError):
values = values.astype(object)
comps = comps.astype(object)
return f(comps, values)
def factorize(values, sort=False, order=None, na_sentinel=-1, size_hint=None):
"""
Encode input values as an enumerated type or categorical variable
Parameters
----------
values : ndarray (1-d)
Sequence
sort : boolean, default False
Sort by values
na_sentinel : int, default -1
Value to mark "not found"
size_hint : hint to the hashtable sizer
Returns
-------
labels : the indexer to the original array
uniques : ndarray (1-d) or Index
the unique values. Index is returned when passed values is Index or
Series
note: an array of Periods will ignore sort as it returns an always sorted
PeriodIndex
"""
values = _ensure_arraylike(values)
original = values
values, dtype, _ = _ensure_data(values)
(hash_klass, vec_klass), values = _get_data_algo(values, _hashtables)
table = hash_klass(size_hint or len(values))
uniques = vec_klass()
check_nulls = not is_integer_dtype(original)
labels = table.get_labels(values, uniques, 0, na_sentinel, check_nulls)
labels = _ensure_platform_int(labels)
uniques = uniques.to_array()
if sort and len(uniques) > 0:
from pandas.core.sorting import safe_sort
uniques, labels = safe_sort(uniques, labels, na_sentinel=na_sentinel,
assume_unique=True)
uniques = _reconstruct_data(uniques, dtype, original)
# return original tenor
if isinstance(original, ABCIndexClass):
uniques = original._shallow_copy(uniques, name=None)
elif isinstance(original, ABCSeries):
from pandas import Index
uniques = Index(uniques)
return labels, uniques
def value_counts(values, sort=True, ascending=False, normalize=False,
bins=None, dropna=True):
"""
Compute a histogram of the counts of non-null values.
Parameters
----------
values : ndarray (1-d)
sort : boolean, default True
Sort by values
ascending : boolean, default False
Sort in ascending order
normalize: boolean, default False
If True then compute a relative histogram
bins : integer, optional
Rather than count values, group them into half-open bins,
convenience for pd.cut, only works with numeric data
dropna : boolean, default True
Don't include counts of NaN
Returns
-------
value_counts : Series
"""
from pandas.core.series import Series, Index
name = getattr(values, 'name', None)
if bins is not None:
try:
from pandas.core.reshape.tile import cut
values = Series(values)
ii = cut(values, bins, include_lowest=True)
except TypeError:
raise TypeError("bins argument only works with numeric data.")
# count, remove nulls (from the index), and but the bins
result = ii.value_counts(dropna=dropna)
result = result[result.index.notna()]
result.index = result.index.astype('interval')
result = result.sort_index()
# if we are dropna and we have NO values
if dropna and (result.values == 0).all():
result = result.iloc[0:0]
# normalizing is by len of all (regardless of dropna)
counts = np.array([len(ii)])
else:
if is_categorical_dtype(values) or is_sparse(values):
# handle Categorical and sparse,
result = Series(values).values.value_counts(dropna=dropna)
result.name = name
counts = result.values
else:
keys, counts = _value_counts_arraylike(values, dropna)
if not isinstance(keys, Index):
keys = Index(keys)
result = Series(counts, index=keys, name=name)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / float(counts.sum())
return result
def _value_counts_arraylike(values, dropna):
"""
Parameters
----------
values : arraylike
dropna : boolean
Returns
-------
(uniques, counts)
"""
values = _ensure_arraylike(values)
original = values
values, dtype, ndtype = _ensure_data(values)
if needs_i8_conversion(dtype):
# i8
keys, counts = htable.value_count_int64(values, dropna)
if dropna:
msk = keys != iNaT
keys, counts = keys[msk], counts[msk]
else:
# ndarray like
# TODO: handle uint8
f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype))
keys, counts = f(values, dropna)
mask = isna(values)
if not dropna and mask.any():
if not isna(keys).any():
keys = np.insert(keys, 0, np.NaN)
counts = np.insert(counts, 0, mask.sum())
keys = _reconstruct_data(keys, original.dtype, original)
return keys, counts
def duplicated(values, keep='first'):
"""
Return boolean ndarray denoting duplicate values.
.. versionadded:: 0.19.0
Parameters
----------
values : ndarray-like
Array over which to check for duplicate values.
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last
occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : ndarray
"""
values, dtype, ndtype = _ensure_data(values)
f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype))
return f(values, keep=keep)
def mode(values):
"""
Returns the mode(s) of an array.
Parameters
----------
values : array-like
Array over which to check for duplicate values.
Returns
-------
mode : Series
"""
from pandas import Series
values = _ensure_arraylike(values)
original = values
# categorical is a fast-path
if is_categorical_dtype(values):
if isinstance(values, Series):
return Series(values.values.mode(), name=values.name)
return values.mode()
values, dtype, ndtype = _ensure_data(values)
# TODO: this should support float64
if ndtype not in ['int64', 'uint64', 'object']:
ndtype = 'object'
values = _ensure_object(values)
f = getattr(htable, "mode_{dtype}".format(dtype=ndtype))
result = f(values)
try:
result = np.sort(result)
except TypeError as e:
warn("Unable to sort modes: %s" % e)
result = _reconstruct_data(result, original.dtype, original)
return Series(result)
def rank(values, axis=0, method='average', na_option='keep',
ascending=True, pct=False):
"""
Rank the values along a given axis.
Parameters
----------
values : array-like
Array whose values will be ranked. The number of dimensions in this
array must not exceed 2.
axis : int, default 0
Axis over which to perform rankings.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
The method by which tiebreaks are broken during the ranking.
na_option : {'keep', 'top'}, default 'keep'
The method by which NaNs are placed in the ranking.
- ``keep``: rank each NaN value with a NaN ranking
- ``top``: replace each NaN with either +/- inf so that they
there are ranked at the top
ascending : boolean, default True
Whether or not the elements should be ranked in ascending order.
pct : boolean, default False
Whether or not to the display the returned rankings in integer form
(e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
"""
if values.ndim == 1:
f, values = _get_data_algo(values, _rank1d_functions)
ranks = f(values, ties_method=method, ascending=ascending,
na_option=na_option, pct=pct)
elif values.ndim == 2:
f, values = _get_data_algo(values, _rank2d_functions)
ranks = f(values, axis=axis, ties_method=method,
ascending=ascending, na_option=na_option, pct=pct)
else:
raise TypeError("Array with ndim > 2 are not supported.")
return ranks
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None):
"""
Perform array addition that checks for underflow and overflow.
Performs the addition of an int64 array and an int64 integer (or array)
but checks that they do not result in overflow first. For elements that
are indicated to be NaN, whether or not there is overflow for that element
is automatically ignored.
Parameters
----------
arr : array addend.
b : array or scalar addend.
arr_mask : boolean array or None
array indicating which elements to exclude from checking
b_mask : boolean array or boolean or None
array or scalar indicating which element(s) to exclude from checking
Returns
-------
sum : An array for elements x + b for each element x in arr if b is
a scalar or an array for elements x + y for each element pair
(x, y) in (arr, b).
Raises
------
OverflowError if any x + y exceeds the maximum or minimum int64 value.
"""
def _broadcast(arr_or_scalar, shape):
"""
Helper function to broadcast arrays / scalars to the desired shape.
"""
if _np_version_under1p10:
if lib.isscalar(arr_or_scalar):
out = np.empty(shape)
out.fill(arr_or_scalar)
else:
out = arr_or_scalar
else:
out = np.broadcast_to(arr_or_scalar, shape)
return out
# For performance reasons, we broadcast 'b' to the new array 'b2'
# so that it has the same size as 'arr'.
b2 = _broadcast(b, arr.shape)
if b_mask is not None:
# We do the same broadcasting for b_mask as well.
b2_mask = _broadcast(b_mask, arr.shape)
else:
b2_mask = None
# For elements that are NaN, regardless of their value, we should
# ignore whether they overflow or not when doing the checked add.
if arr_mask is not None and b2_mask is not None:
not_nan = np.logical_not(arr_mask | b2_mask)
elif arr_mask is not None:
not_nan = np.logical_not(arr_mask)
elif b_mask is not None:
not_nan = np.logical_not(b2_mask)
else:
not_nan = np.empty(arr.shape, dtype=bool)
not_nan.fill(True)
# gh-14324: For each element in 'arr' and its corresponding element
# in 'b2', we check the sign of the element in 'b2'. If it is positive,
# we then check whether its sum with the element in 'arr' exceeds
# np.iinfo(np.int64).max. If so, we have an overflow error. If it
# it is negative, we then check whether its sum with the element in
# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow
# error as well.
mask1 = b2 > 0
mask2 = b2 < 0
if not mask1.any():
to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any()
elif not mask2.any():
to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any()
else:
to_raise = (((np.iinfo(np.int64).max -
b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or
((np.iinfo(np.int64).min -
b2[mask2] > arr[mask2]) & not_nan[mask2]).any())
if to_raise:
raise OverflowError("Overflow in int64 addition")
return arr + b
_rank1d_functions = {
'float64': algos.rank_1d_float64,
'int64': algos.rank_1d_int64,
'uint64': algos.rank_1d_uint64,
'object': algos.rank_1d_object
}
_rank2d_functions = {
'float64': algos.rank_2d_float64,
'int64': algos.rank_2d_int64,
'uint64': algos.rank_2d_uint64,
'object': algos.rank_2d_object
}
def quantile(x, q, interpolation_method='fraction'):
"""
Compute sample quantile or quantiles of the input array. For example, q=0.5
computes the median.
The `interpolation_method` parameter supports three values, namely
`fraction` (default), `lower` and `higher`. Interpolation is done only,
if the desired quantile lies between two data points `i` and `j`. For
`fraction`, the result is an interpolated value between `i` and `j`;
for `lower`, the result is `i`, for `higher` the result is `j`.
Parameters
----------
x : ndarray
Values from which to extract score.
q : scalar or array
Percentile at which to extract score.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
- fraction: `i + (j - i)*fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
-lower: `i`.
- higher: `j`.
Returns
-------
score : float
Score at percentile.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
x = np.asarray(x)
mask = isna(x)
x = x[~mask]
values = np.sort(x)
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a) * fraction
def _get_score(at):
if len(values) == 0:
return np.nan
idx = at * (len(values) - 1)
if idx % 1 == 0:
score = values[int(idx)]
else:
if interpolation_method == 'fraction':
score = _interpolate(values[int(idx)], values[int(idx) + 1],
idx % 1)
elif interpolation_method == 'lower':
score = values[np.floor(idx)]
elif interpolation_method == 'higher':
score = values[np.ceil(idx)]
else:
raise ValueError("interpolation_method can only be 'fraction' "
", 'lower' or 'higher'")
return score
if is_scalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
return algos.arrmap_float64(q, _get_score)
# --------------- #
# select n #
# --------------- #
class SelectN(object):
def __init__(self, obj, n, keep):
self.obj = obj
self.n = n
self.keep = keep
if self.keep not in ('first', 'last'):
raise ValueError('keep must be either "first", "last"')
def nlargest(self):
return self.compute('nlargest')
def nsmallest(self):
return self.compute('nsmallest')
@staticmethod
def is_valid_dtype_n_method(dtype):
"""
Helper function to determine if dtype is valid for
nsmallest/nlargest methods
"""
return ((is_numeric_dtype(dtype) and not is_complex_dtype(dtype)) or
needs_i8_conversion(dtype))
class SelectNSeries(SelectN):
"""
Implement n largest/smallest for Series
Parameters
----------
obj : Series
n : int
keep : {'first', 'last'}, default 'first'
Returns
-------
nordered : Series
"""
def compute(self, method):
n = self.n
dtype = self.obj.dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError("Cannot use method '{method}' with "
"dtype {dtype}".format(method=method,
dtype=dtype))
if n <= 0:
return self.obj[[]]
dropped = self.obj.dropna()
# slow method
if n >= len(self.obj):
reverse_it = (self.keep == 'last' or method == 'nlargest')
ascending = method == 'nsmallest'
slc = np.s_[::-1] if reverse_it else np.s_[:]
return dropped[slc].sort_values(ascending=ascending).head(n)
# fast method
arr, _, _ = _ensure_data(dropped.values)
if method == 'nlargest':
arr = -arr
if self.keep == 'last':
arr = arr[::-1]
narr = len(arr)
n = min(n, narr)
kth_val = algos.kth_smallest(arr.copy(), n - 1)
ns, = np.nonzero(arr <= kth_val)
inds = ns[arr[ns].argsort(kind='mergesort')][:n]
if self.keep == 'last':
# reverse indices
inds = narr - 1 - inds
return dropped.iloc[inds]
class SelectNFrame(SelectN):
"""
Implement n largest/smallest for DataFrame
Parameters
----------
obj : DataFrame
n : int
keep : {'first', 'last'}, default 'first'
columns : list or str
Returns
-------
nordered : DataFrame
"""
def __init__(self, obj, n, keep, columns):
super(SelectNFrame, self).__init__(obj, n, keep)
if not is_list_like(columns):
columns = [columns]
columns = list(columns)
self.columns = columns
def compute(self, method):
from pandas import Int64Index
n = self.n
frame = self.obj
columns = self.columns
for column in columns:
dtype = frame[column].dtype
if not self.is_valid_dtype_n_method(dtype):
raise TypeError((
"Column {column!r} has dtype {dtype}, cannot use method "
"{method!r} with this dtype"
).format(column=column, dtype=dtype, method=method))
def get_indexer(current_indexer, other_indexer):
"""Helper function to concat `current_indexer` and `other_indexer`
depending on `method`
"""
if method == 'nsmallest':
return current_indexer.append(other_indexer)
else:
return other_indexer.append(current_indexer)
# Below we save and reset the index in case index contains duplicates
original_index = frame.index
cur_frame = frame = frame.reset_index(drop=True)
cur_n = n
indexer = Int64Index([])
for i, column in enumerate(columns):
# For each column we apply method to cur_frame[column].
# If it is the last column in columns, or if the values
# returned are unique in frame[column] we save this index
# and break
# Otherwise we must save the index of the non duplicated values
# and set the next cur_frame to cur_frame filtered on all
# duplcicated values (#GH15297)
series = cur_frame[column]
values = getattr(series, method)(cur_n, keep=self.keep)
is_last_column = len(columns) - 1 == i
if is_last_column or values.nunique() == series.isin(values).sum():
# Last column in columns or values are unique in
# series => values
# is all that matters
indexer = get_indexer(indexer, values.index)
break
duplicated_filter = series.duplicated(keep=False)
duplicated = values[duplicated_filter]
non_duplicated = values[~duplicated_filter]
indexer = get_indexer(indexer, non_duplicated.index)
# Must set cur frame to include all duplicated values
# to consider for the next column, we also can reduce
# cur_n by the current length of the indexer
cur_frame = cur_frame[series.isin(duplicated)]
cur_n = n - len(indexer)
frame = frame.take(indexer)
# Restore the index on frame
frame.index = original_index.take(indexer)
return frame
# ------- ## ---- #
# take #
# ---- #
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_object(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'): _view_wrapper(algos.take_1d_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_1d_bool_object, np.uint8,
None),
('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(
algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis0_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_axis1_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'): _view_wrapper(algos.take_2d_multi_bool_bool, np.uint8,
np.uint8),
('bool', 'object'): _view_wrapper(algos.take_2d_multi_bool_object,
np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
indexer = _ensure_int64(indexer)
_take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value,
mask_info=mask_info)
return func
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
Parameters
----------
arr : ndarray
Input array
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indicies are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
_maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
# dispatch to internal type takes
if is_categorical(arr):
return arr.take_nd(indexer, fill_value=fill_value,
allow_fill=allow_fill)
elif is_datetimetz(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
elif is_interval_dtype(arr):
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = _ensure_int64(indexer, copy=False)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis,
mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None,
allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = _ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = _ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_object(arr, indexer, out, fill_value=fill_value,
mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
# ---- #
# diff #
# ---- #
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
"""
difference of n between self,
analagoust to s-s.shift(n)
Parameters
----------
arr : ndarray
n : int
number of periods
axis : int
axis to shift on
Returns
-------
shifted
"""
n = int(n)
na = np.nan
dtype = arr.dtype
is_timedelta = False
if needs_i8_conversion(arr):
dtype = np.float64
arr = arr.view('i8')
na = iNaT
is_timedelta = True
elif is_bool_dtype(dtype):
dtype = np.object_
elif is_integer_dtype(dtype):
dtype = np.float64
dtype = np.dtype(dtype)
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if is_timedelta:
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
if is_timedelta:
from pandas import TimedeltaIndex
out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape(
out_arr.shape).astype('timedelta64[ns]')
return out_arr
| bsd-3-clause |
Srisai85/scipy | scipy/stats/stats.py | 18 | 169352 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import find_repeats, linregress, theilslopes
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be either 'propagate', 'raise', or "
"'ignore'")
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
# Return namedtuple for clarity
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if contains_nan and nan_policy == 'propagate':
res = np.zeros(6) * np.nan
return DescribeResult(*res)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if contains_nan and nan_policy == 'propagate':
return SkewtestResult(np.nan, np.nan)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
if contains_nan and nan_policy == 'propagate':
return KurtosistestResult(np.nan, np.nan)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
if contains_nan and nan_policy == 'propagate':
return NormaltestResult(np.nan, np.nan)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit',
'binsize', 'extrapoints'))
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit',
'binsize', 'extrapoints'))
return CumfreqResult(cumhist, l, b, e)
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit',
'binsize', 'extrapoints'))
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
... 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower',
'upper'))
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.1210194716424473, pvalue=0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
def pointbiserialr(x, y):
"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
def kendalltau(x, y, initial_lexsort=True, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
contains_nan, nan_policy = (_contains_nan(x, nan_policy) or
_contains_nan(y, nan_policy))
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
# Special case for all ties in both ranks
return KendalltauResult(np.nan, np.nan)
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_rel(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D,
distributions.kstwobign.sf(D * np.sqrt(N)))
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic',
'pvalue'))
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
return Ks_2sampResult(d, prob)
def mannwhitneyu(x, y, use_continuity=True, alternative='two-sided'):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
statistic : float
The Mann-Whitney statistics.
pvalue : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
fact2 = 1
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative == 'less':
z = u1 - meanrank
elif alternative == 'greater':
z = u2 - meanrank
elif alternative == 'two-sided':
bigu = max(u1, u2)
z = np.abs(bigu - meanrank)
fact2 = 2.
else:
raise ValueError("alternative should be 'less', 'greater'"
"or 'two-sided'")
z = z / sd
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u2, distributions.norm.sf(z) * fact2)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
return RanksumsResult(z, prob)
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/svm/tests/test_sparse.py | 95 | 12156 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catchs some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
quantumlib/Cirq | cirq-core/cirq/contrib/routing/device.py | 1 | 3170 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Iterable, Tuple, Dict, Any
import networkx as nx
import cirq
from cirq._compat import deprecated
@deprecated(deadline="v0.12", fix="use gridqubits_to_graph_device(device.qubits) instead")
def xmon_device_to_graph(device: Any) -> nx.Graph:
"""Gets the graph of an XmonDevice."""
return gridqubits_to_graph_device(device.qubits)
def get_linear_device_graph(n_qubits: int) -> nx.Graph:
"""Gets the graph of a linearly connected device."""
qubits = cirq.LineQubit.range(n_qubits)
edges = [tuple(qubits[i : i + 2]) for i in range(n_qubits - 1)]
return nx.Graph(edges)
def get_grid_device_graph(*args, **kwargs) -> nx.Graph:
"""Gets the graph of a grid of qubits.
See GridQubit.rect for argument details."""
return gridqubits_to_graph_device(cirq.GridQubit.rect(*args, **kwargs))
def gridqubits_to_graph_device(qubits: Iterable[cirq.GridQubit]):
"""Gets the graph of a set of grid qubits."""
return nx.Graph(
pair for pair in itertools.combinations(qubits, 2) if _manhattan_distance(*pair) == 1
)
def _manhattan_distance(qubit1: cirq.GridQubit, qubit2: cirq.GridQubit) -> int:
return abs(qubit1.row - qubit2.row) + abs(qubit1.col - qubit2.col)
def nx_qubit_layout(graph: nx.Graph) -> Dict[cirq.Qid, Tuple[float, float]]:
"""Return a layout for a graph for nodes which are qubits.
This can be used in place of nx.spring_layout or other networkx layouts.
GridQubits are positioned according to their row/col. LineQubits are
positioned in a line.
>>> import cirq.contrib.routing as ccr
>>> import networkx as nx
>>> import matplotlib.pyplot as plt
>>> # Clear plot state to prevent issues with pyplot dimensionality.
>>> plt.clf()
>>> g = ccr.gridqubits_to_graph_device(cirq.GridQubit.rect(4,5))
>>> pos = ccr.nx_qubit_layout(g)
>>> nx.draw_networkx(g, pos=pos)
"""
pos: Dict[cirq.Qid, Tuple[float, float]] = {}
_node_to_i_cache = None
for node in graph.nodes:
if isinstance(node, cirq.GridQubit):
pos[node] = (node.col, -node.row)
elif isinstance(node, cirq.LineQubit):
# Offset to avoid overlap with gridqubits
pos[node] = (node.x, 0.5)
else:
if _node_to_i_cache is None:
_node_to_i_cache = {n: i for i, n in enumerate(sorted(graph.nodes))}
# Position in a line according to sort order
# Offset to avoid overlap with gridqubits
pos[node] = (0.5, _node_to_i_cache[node] + 1)
return pos
| apache-2.0 |
shikhardb/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 14 | 15763 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
ProkopHapala/SimpleSimulationEngine | python/pySimE/space/exp/pykep/lambert_Prokop.py | 1 | 1376 |
import numpy as np
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from PyKEP import epoch, DAY2SEC, planet_ss, AU, MU_SUN, lambert_problem
from PyKEP.orbit_plots import plot_planet, plot_lambert
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
t1 = epoch(0)
t2 = epoch(740)
dt = (t2.mjd2000 - t1.mjd2000) * DAY2SEC
ax.scatter(0,0,0, color='y')
pl = planet_ss('earth')
plot_planet(ax,pl, t0=t1, color=(0.8,0.8,1), legend=True, units = AU)
rE,vE = pl.eph(t1)
pl = planet_ss('mars')
plot_planet(ax,pl, t0=t2, color=(0.8,0.8,1), legend=True, units = AU)
rM, vM = pl.eph(t2)
l = lambert_problem(rE,rM,dt,MU_SUN)
nmax = l.get_Nmax()
print "max number of revolutions",nmax
plot_lambert(ax,l , color=(1,0,0), legend=True, units = AU)
for i in range(1,nmax*2+1):
print i
plot_lambert(ax,l,sol=i, color=(1,0,i/float(nmax*2)), legend=True, units = AU)
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
axisEqual3D(ax)
plt.show()
| mit |
bertyhell/moviepy | moviepy/video/tools/segmenting.py | 16 | 1826 | import numpy as np
import scipy.ndimage as ndi
from moviepy.video.VideoClip import ImageClip
def findObjects(clip,rem_thr=500, preview=False):
"""
Returns a list of ImageClips representing each a separate object on
the screen.
rem_thr : all objects found with size < rem_Thr will be
considered false positives and will be removed
"""
image = clip.get_frame(0)
if clip.mask is None:
clip = clip.add_mask()
mask = clip.mask.get_frame(0)
labelled, num_features = ndi.measurements.label(image[:,:,0])
#find the objects
slices = ndi.find_objects(labelled)
# cool trick to remove letter holes (in o,e,a, etc.)
slices = [e for e in slices if mask[e[0],e[1]].mean() >0.2]
# remove very small slices
slices = [e for e in slices if image[e[0],e[1]].size > rem_thr]
# Sort the slices from left to right
islices = sorted(enumerate(slices), key = lambda s : s[1][1].start)
letters = []
for i,(ind,(sy,sx)) in enumerate(islices):
""" crop each letter separately """
sy = slice(sy.start-1,sy.stop+1)
sx = slice(sx.start-1,sx.stop+1)
letter = image[sy,sx]
labletter = labelled[sy,sx]
maskletter = (labletter==(ind+1))*mask[sy,sx]
letter = ImageClip(image[sy,sx])
letter.mask = ImageClip( maskletter,ismask=True)
letter.screenpos = np.array((sx.start,sy.start))
letters.append(letter)
if preview:
import matplotlib.pyplot as plt
print( "found %d objects"%(num_features) )
fig,ax = plt.subplots(2)
ax[0].axis('off')
ax[0].imshow(labelled)
ax[1].imshow([range(num_features)],interpolation='nearest')
ax[1].set_yticks([])
plt.show()
return letters
| mit |
joostvanzwieten/nutils | docs/sphinx_mods.py | 1 | 14556 | # Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import inspect, pathlib, shutil, os, runpy, urllib.parse, shlex, doctest, re, io, hashlib, base64, treelog, stringly
import docutils.nodes, docutils.parsers.rst, docutils.statemachine
import sphinx.util.logging, sphinx.util.docutils, sphinx.addnodes
import nutils.matrix, nutils.testing
import numpy
project_root = pathlib.Path(__file__).parent.parent.resolve()
def process_signature(self, objtype, fullname, object, options, args, retann):
if objtype in ('function', 'class', 'method'):
try:
signature = inspect.signature(object)
except ValueError:
# Some builtins have no signature.
return
else:
return
# Drop annotations from signature.
signature = signature.replace(parameters=(param.replace(annotation=param.empty) for param in signature.parameters.values()),
return_annotation=inspect.Signature.empty)
# Return a string representation of args and of the return annotation. Note
# that `str(signature)` would have included the return annotation if we
# hadn't removed it above.
return str(signature).replace('\\', '\\\\'), ''
def print_rst_autogen_header(*, file, src=None):
print('..', file=file)
print(' Automatically generated. Edits are futile.', file=file)
print(file=file)
print(':autogenerated:', file=file)
if src is not None:
abssrc = src.resolve().relative_to(project_root)
print(':autogeneratedfrom: {}'.format(abssrc), file=file)
print(file=file)
def print_rst_h1(text, *, file):
assert '\n' not in text
print(file=file)
print(text, file=file)
print('='*len(text), file=file)
print(file=file)
def print_rst_label(name, *, file):
print(file=file)
print('.. _{}:'.format(name), file=file)
print(file=file)
def copy_utime(src, dst):
stat = os.stat(str(src))
os.utime(str(dst), ns=(stat.st_atime_ns, stat.st_mtime_ns))
def generate_examples(app):
dst_examples = pathlib.Path(app.srcdir)/'examples'
dst_examples.mkdir(parents=True, exist_ok=True)
srcs = tuple(f for f in sorted(project_root.glob('examples/*.py')) if f.name != '__init__.py')
for src in sphinx.util.status_iterator(srcs, 'generating examples... ', 'purple', len(srcs), app.verbosity):
name = src.name
dst = dst_examples/(src.with_suffix('.rst').name)
with dst.open('w', encoding='utf-8') as f_dst:
print_rst_autogen_header(file=f_dst, src=src)
# Add a label such that you can reference an example by
# :ref:`examples/laplace.py`.
print_rst_label('examples/{}'.format(name), file=f_dst)
print_rst_h1(name, file=f_dst)
print('.. exampledoc:: {}'.format(src.relative_to(project_root).as_posix()), file=f_dst)
copy_utime(src, dst)
class LineIter:
def __init__(self, lines):
self._lines = iter(lines)
self._index = -1
self._next = None
self.__next__()
def __bool__(self):
return self._next != StopIteration
def __iter__(self):
return self
def __next__(self):
if self._next == StopIteration:
raise StopIteration
value = self._index, self._next
try:
self._next = next(self._lines)
self._index += 1
except StopIteration:
self._next = StopIteration
return value
@property
def peek(self):
if self._next == StopIteration:
raise ValueError
else:
return self._next
class ExampleDocDirective(docutils.parsers.rst.Directive):
has_content = False
required_arguments = 1
options_arguments = 0
@staticmethod
def _isdocline(line):
line = line.lstrip()
return line.rstrip() == '#' or line.startswith('# ')
def run(self):
logger = sphinx.util.logging.getLogger(__name__)
nodes = []
src = project_root/self.arguments[0]
with src.open('r', encoding='utf-8') as f:
prevtype = None
lines = LineIter(f)
if lines and lines.peek.startswith('#!'):
next(lines)
while lines:
if lines.peek.rstrip('\n') == '':
next(lines)
elif self._isdocline(lines.peek):
# Collect all doc lines.
contents = docutils.statemachine.ViewList()
while lines and self._isdocline(lines.peek):
i, line = next(lines)
contents.append(line.lstrip()[2:], self.arguments[0], i)
# Parse as rst into `node`.
with sphinx.util.docutils.switch_source_input(self.state, contents):
node = docutils.nodes.container()
self.state.nested_parse(contents, 0, node)
# Process sh roles. Add links to logs.
for sh_node in node.traverse(docutils.nodes.literal):
if 'nutils_sh' not in sh_node:
continue
cmdline = sh_node.get('nutils_sh')
cmdline_parts = tuple(shlex.split(cmdline))
if cmdline_parts[:2] != ('python3', src.name):
logger.warning('Not creating a log for {}.'.format(cmdline))
continue
log_link = sphinx.addnodes.only(expr='html')
log_link.append(docutils.nodes.inline('', ' '))
xref = sphinx.addnodes.pending_xref('', reftype='nutils-log', refdomain='std', reftarget=cmdline_parts[2:], script=src)
xref += docutils.nodes.inline('', '(view log)', classes=['nutils-log-link'])
log_link += xref
sh_node.parent.insert(sh_node.parent.index(sh_node)+1, log_link)
nodes.extend(node.children)
else:
# Collect all source lines.
istart, line = next(lines)
contents = [line]
while lines and not self._isdocline(lines.peek):
i, line = next(lines)
contents.append(line)
# Remove trailing empty lines.
while contents and contents[-1].rstrip('\n') == '':
del contents[-1]
contents = ''.join(contents)
# Create literal block.
literal = docutils.nodes.literal_block(contents, contents)
literal['language'] = 'python3'
literal['linenos'] = True
literal['highlight_args'] = dict(linenostart=istart+1)
sphinx.util.nodes.set_source_info(self, literal)
nodes.append(literal)
return nodes
def role_sh(name, rawtext, text, lineno, inliner, options={}, context=[]):
return [docutils.nodes.literal('', text, nutils_sh=text)], []
def create_log(app, env, node, contnode):
logger = sphinx.util.logging.getLogger(__name__)
if node['reftype'] == 'nutils-log':
script = node.get('script')
scriptname = str(script.relative_to(project_root))
cmdline_args = node['reftarget']
cmdline = ' '.join(map(shlex.quote, [scriptname, *cmdline_args]))
target = '_logs/{}/index'.format(urllib.parse.quote(cmdline, safe='').replace('%', '+'))
dst_log = (pathlib.Path(app.builder.outdir)/target).parent
if dst_log.exists() and dst_log.stat().st_mtime > script.stat().st_mtime:
logger.debug('Skip building log of {cmdline} because it already exists and '
'is newer than {script}. Please touch {script} to force a rebuild.'
.format(script=scriptname, cmdline=cmdline))
else:
if dst_log.exists():
logger.debug('purging old log files... {}'.format(dst_log))
shutil.rmtree(str(dst_log))
else:
dst_log.parent.mkdir(parents=True, exist_ok=True)
logger.info('creating log... {}'.format(cmdline))
script_dict = runpy.run_path(str(script), run_name='__log_builder__')
# Parse cmdline.
func = script_dict['main']
params = inspect.signature(func).parameters
doc = stringly.util.DocString(func)
kwargs = doc.defaults.copy()
kwargs.update(arg.split('=', 1) for arg in cmdline_args if arg)
# Run script.
import matplotlib.testing
matplotlib.testing.setup()
with nutils.cli._htmllog(outdir=str(dst_log), scriptname=scriptname, kwargs=[(name, kwargs[name], doc.argdocs[name]) for name in params]) as log, treelog.set(log), nutils.matrix.backend('scipy'), nutils.warnings.via(treelog.warning):
func(**{name: stringly.loads(params[name].annotation, kwargs[name]) for name in params})
(dst_log/'log.html').rename(dst_log/'index.html')
refnode = docutils.nodes.reference('', '', internal=False, refuri=app.builder.get_relative_uri(env.docname, target))
refnode.append(contnode)
return refnode
def generate_api(app):
nutils = project_root/'nutils'
dst_root = pathlib.Path(app.srcdir)/'nutils'
dst_root.mkdir(parents=True, exist_ok=True)
srcs = tuple(f for f in sorted(nutils.glob('**/*.py')) if f != nutils/'__init__.py' and (f.name == '__init__.py' or not f.name.startswith('_')))
for src in sphinx.util.status_iterator(srcs, 'generating api... ', 'purple', len(srcs), app.verbosity):
module = '.'.join((src.parent if src.name == '__init__.py' else src.with_suffix('')).relative_to(nutils).parts)
dst = dst_root/(module+'.rst')
with dst.open('w', encoding='utf-8') as f:
print_rst_autogen_header(file=f, src=src)
print_rst_h1(module, file=f)
print('.. automodule:: {}'.format('nutils.{}'.format(module)), file=f)
copy_utime(src, dst)
def remove_generated(app, exception):
logger = sphinx.util.logging.getLogger(__name__)
for name in 'nutils', 'examples':
generated = pathlib.Path(app.srcdir)/name
shutil.rmtree(str(generated), onerror=lambda f, p, e: logger.warning('failed to remove {}'.format(p)))
class RequiresNode(docutils.nodes.Admonition, docutils.nodes.TextElement): pass
def html_visit_requires(self, node):
self.body.append(self.starttag(node, 'div', CLASS='requires'))
def html_depart_requires(self, node):
self.body.append('</div>\n')
def text_visit_requires(self, node):
self.new_state(0)
def text_depart_requires(self, node):
self.end_state()
class RequiresDirective(docutils.parsers.rst.Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
def run(self):
requires = tuple(name.strip() for name in self.arguments[0].split(','))
node = RequiresNode('requires')
node.document = self.state.document
sphinx.util.nodes.set_source_info(self, node)
msg = 'Requires {}.'.format(', '.join(requires))
node.append(docutils.nodes.paragraph('', docutils.nodes.Text(msg, msg), translatable=False))
return [node]
class ConsoleDirective(docutils.parsers.rst.Directive):
has_content = True
required_arguments = 0
options_arguments = 0
info = treelog.proto.Level.info if hasattr(treelog, 'proto') else 1
_console_log = treelog.FilterLog(treelog.StdoutLog(), minlevel=info)
def run(self):
document = self.state.document
env = document.settings.env
nodes = []
indent = min(len(line)-len(line.lstrip()) for line in self.content)
code = ''.join(line[indent:]+'\n' for line in self.content)
code_wo_spread = nutils.testing.FloatNeighborhoodOutputChecker.re_spread.sub(lambda m: m.group(0).split('±', 1)[0], code)
literal = docutils.nodes.literal_block(code_wo_spread, code_wo_spread, classes=['console'])
literal['language'] = 'python3'
literal['linenos'] = False
sphinx.util.nodes.set_source_info(self, literal)
nodes.append(literal)
import matplotlib.testing
matplotlib.testing.setup()
import matplotlib.pyplot
parser = doctest.DocTestParser()
runner = doctest.DocTestRunner(checker=nutils.testing.FloatNeighborhoodOutputChecker(), optionflags=doctest.ELLIPSIS)
globs = getattr(document, '_console_globs', {})
test = parser.get_doctest(code, globs, 'test', env.docname, self.lineno)
with treelog.set(self._console_log):
failures, tries = runner.run(test, clear_globs=False)
for fignum in matplotlib.pyplot.get_fignums():
fig = matplotlib.pyplot.figure(fignum)
with io.BytesIO() as f:
fig.savefig(f, format='svg')
name = hashlib.sha1(f.getvalue()).hexdigest()+'.svg'
uri = 'data:image/svg+xml;base64,{}'.format(base64.b64encode(f.getvalue()).decode())
nodes.append(docutils.nodes.image('', uri=uri, alt='image generated by matplotlib'))
matplotlib.pyplot.close('all')
if failures:
document.reporter.warning('doctest failed', line=self.lineno)
document._console_globs = test.globs
return nodes
def remove_console_globs(app, doctree):
if hasattr(doctree, '_console_globs'):
del doctree._console_globs
def fix_testcase_reference(app, env, node, contnode):
if node['reftarget'] == 'unittest.case.TestCase':
node = node.deepcopy()
node['reftarget'] = 'unittest.TestCase'
return app.emit_firstresult('missing-reference', env, node, contnode)
def setup(app):
app.connect('autodoc-process-signature', process_signature)
app.connect('builder-inited', generate_api)
app.connect('builder-inited', generate_examples)
app.add_directive('exampledoc', ExampleDocDirective)
app.add_role('sh', role_sh)
app.connect('missing-reference', create_log)
app.add_node(RequiresNode,
html=(html_visit_requires, html_depart_requires),
text=(text_visit_requires, text_depart_requires))
app.add_directive('requires', RequiresDirective)
app.add_directive('console', ConsoleDirective)
app.connect('doctree-read', remove_console_globs)
app.connect('build-finished', remove_generated)
app.connect('missing-reference', fix_testcase_reference)
if sphinx.version_info >= (1,8):
app.add_css_file('mods.css')
else:
app.add_stylesheet('mods.css')
# vim: sts=2:sw=2:et
| mit |
epfl-lts2/pygsp | pygsp/graphs/nngraphs/cube.py | 1 | 3294 | # -*- coding: utf-8 -*-
import numpy as np
from pygsp.graphs import NNGraph # prevent circular import in Python < 3.5
class Cube(NNGraph):
r"""Hyper-cube (NN-graph).
Parameters
----------
radius : float
Edge lenght (default = 1)
nb_pts : int
Number of vertices (default = 300)
nb_dim : int
Dimension (default = 3)
sampling : string
Variance of the distance kernel (default = 'random')
(Can now only be 'random')
seed : int
Seed for the random number generator (for reproducible graphs).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> G = graphs.Cube(seed=42)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121)
>>> ax2 = fig.add_subplot(122, projection='3d')
>>> _ = ax1.spy(G.W, markersize=0.5)
>>> _ = G.plot(ax=ax2)
"""
def __init__(self,
radius=1,
nb_pts=300,
nb_dim=3,
sampling='random',
seed=None,
**kwargs):
self.radius = radius
self.nb_pts = nb_pts
self.nb_dim = nb_dim
self.sampling = sampling
self.seed = seed
rs = np.random.RandomState(seed)
if self.nb_dim > 3:
raise NotImplementedError("Dimension > 3 not supported yet!")
if self.sampling == "random":
if self.nb_dim == 2:
pts = rs.rand(self.nb_pts, self.nb_dim)
elif self.nb_dim == 3:
n = self.nb_pts // 6
pts = np.zeros((n*6, 3))
pts[:n, 1:] = rs.rand(n, 2)
pts[n:2*n, :] = np.concatenate((np.ones((n, 1)),
rs.rand(n, 2)),
axis=1)
pts[2*n:3*n, :] = np.concatenate((rs.rand(n, 1),
np.zeros((n, 1)),
rs.rand(n, 1)),
axis=1)
pts[3*n:4*n, :] = np.concatenate((rs.rand(n, 1),
np.ones((n, 1)),
rs.rand(n, 1)),
axis=1)
pts[4*n:5*n, :2] = rs.rand(n, 2)
pts[5*n:6*n, :] = np.concatenate((rs.rand(n, 2),
np.ones((n, 1))),
axis=1)
else:
raise ValueError("Unknown sampling !")
plotting = {
'vertex_size': 80,
'elevation': 15,
'azimuth': 0,
'distance': 9,
}
super(Cube, self).__init__(Xin=pts, k=10,
center=False, rescale=False,
plotting=plotting, **kwargs)
def _get_extra_repr(self):
attrs = {'radius': '{:.2f}'.format(self.radius),
'nb_pts': self.nb_pts,
'nb_dim': self.nb_dim,
'sampling': self.sampling,
'seed': self.seed}
attrs.update(super(Cube, self)._get_extra_repr())
return attrs
| bsd-3-clause |
ilyes14/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
gfyoung/pandas | pandas/core/dtypes/missing.py | 2 | 18331 | """
missing types & inference
"""
from functools import partial
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import NaT, Period, iNaT
from pandas._typing import ArrayLike, DtypeObj
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
TD64NS_DTYPE,
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetimelike_v_numeric,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
is_string_like_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
ABCIndex,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
isneginf_scalar = libmissing.isneginf_scalar
nan_checker = np.isnan
INF_AS_NA = False
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
isnull = isna
def _isna(obj, inf_as_na: bool = False):
"""
Detect missing values, treating None, NaN or NA as null. Infinite
values will also be treated as null if inf_as_na is True.
Parameters
----------
obj: ndarray or object value
Input array or scalar value.
inf_as_na: bool
Whether to treat infinity as null.
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
if inf_as_na:
return libmissing.checknull_old(obj)
else:
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)):
return _isna_ndarraylike(obj, inf_as_na=inf_as_na)
elif isinstance(obj, ABCDataFrame):
return obj.isna()
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
elif hasattr(obj, "__array__"):
return _isna_ndarraylike(np.asarray(obj), inf_as_na=inf_as_na)
else:
return False
def _use_inf_as_na(key):
"""
Option change callback for na/inf behaviour.
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* https://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
inf_as_na = get_option(key)
globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
if inf_as_na:
globals()["nan_checker"] = lambda x: ~np.isfinite(x)
globals()["INF_AS_NA"] = True
else:
globals()["nan_checker"] = np.isnan
globals()["INF_AS_NA"] = False
def _isna_ndarraylike(obj, inf_as_na: bool = False):
"""
Return an array indicating which values of the input array are NaN / NA.
Parameters
----------
obj: array-like
The input array whose elements are to be checked.
inf_as_na: bool
Whether or not to treat infinite values as NA.
Returns
-------
array-like
Array of boolean values denoting the NA status of each element.
"""
values = getattr(obj, "_values", obj)
dtype = values.dtype
if is_extension_array_dtype(dtype):
if inf_as_na and is_categorical_dtype(dtype):
result = libmissing.isnaobj_old(values.to_numpy())
else:
result = values.isna()
elif is_string_dtype(dtype):
result = _isna_string_dtype(values, dtype, inf_as_na=inf_as_na)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
if inf_as_na:
result = ~np.isfinite(values)
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
return result
def _isna_string_dtype(
values: np.ndarray, dtype: np.dtype, inf_as_na: bool
) -> np.ndarray:
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
if inf_as_na:
vec = libmissing.isnaobj_old(values.ravel())
else:
vec = libmissing.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
return result
def notna(obj):
"""
Detect non-missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are valid (not missing, which is ``NaN`` in numeric
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : array-like or object value
Object to check for *not* null or *non*-missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is valid.
See Also
--------
isna : Boolean inverse of pandas.notna.
Series.notna : Detect valid values in a Series.
DataFrame.notna : Detect valid values in a DataFrame.
Index.notna : Detect valid values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.notna('dog')
True
>>> pd.notna(pd.NA)
False
>>> pd.notna(np.nan)
False
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.notna(array)
array([[ True, False, True],
[ True, True, False]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.notna(index)
array([ True, True, False, True])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.notna(df)
0 1 2
0 True True True
1 True False True
>>> pd.notna(df[1])
0 True
1 False
Name: 1, dtype: bool
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def isna_compat(arr, fill_value=np.nan) -> bool:
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
if isna(fill_value):
dtype = arr.dtype
return not (is_bool_dtype(dtype) or is_integer_dtype(dtype))
return True
def array_equivalent(
left, right, strict_nan: bool = False, dtype_equal: bool = False
) -> bool:
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
dtype_equal : bool, default False
Whether `left` and `right` are known to have the same dtype
according to `is_dtype_equal`. Some methods like `BlockManager.equals`.
require that the dtypes match. Setting this to ``True`` can improve
performance, but will give different results for arrays that are
equal but different dtypes.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
if dtype_equal:
# fastpath when we require that the dtypes match (Block.equals)
if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype):
return _array_equivalent_float(left, right)
elif is_datetimelike_v_numeric(left.dtype, right.dtype):
return False
elif needs_i8_conversion(left.dtype):
return _array_equivalent_datetimelike(left, right)
elif is_string_dtype(left.dtype):
# TODO: fastpath for pandas' StringDtype
return _array_equivalent_object(left, right, strict_nan)
else:
return np.array_equal(left, right)
# Slow path when we allow comparing different dtypes.
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left.dtype) or is_string_dtype(right.dtype):
return _array_equivalent_object(left, right, strict_nan)
# NaNs can occur in float and complex arrays.
if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype):
if not (np.prod(left.shape) and np.prod(right.shape)):
return True
return ((left == right) | (isna(left) & isna(right))).all()
elif is_datetimelike_v_numeric(left, right):
# GH#29553 avoid numpy deprecation warning
return False
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# datetime64, timedelta64, Period
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view("i8")
right = right.view("i8")
# if we have structured dtypes, compare first
if (
left.dtype.type is np.void or right.dtype.type is np.void
) and left.dtype != right.dtype:
return False
return np.array_equal(left, right)
def _array_equivalent_float(left, right):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
def _array_equivalent_datetimelike(left, right):
return np.array_equal(left.view("i8"), right.view("i8"))
def _array_equivalent_object(left, right, strict_nan):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
ensure_object(left.ravel()), ensure_object(right.ravel())
)
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif left_value is libmissing.NA and right_value is not libmissing.NA:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if not isinstance(right_value, float) or not np.isnan(right_value):
return False
else:
try:
if np.any(np.asarray(left_value != right_value)):
return False
except TypeError as err:
if "Cannot compare tz-naive" in str(err):
# tzawareness compat failure, see GH#28507
return False
elif "boolean value of NA is ambiguous" in str(err):
return False
raise
return True
def array_equals(left: ArrayLike, right: ArrayLike) -> bool:
"""
ExtensionArray-compatible implementation of array_equivalent.
"""
if not is_dtype_equal(left.dtype, right.dtype):
return False
elif isinstance(left, ABCExtensionArray):
return left.equals(right)
else:
return array_equivalent(left, right, dtype_equal=True)
def infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if needs_i8_conversion(val.dtype):
return np.array("NaT", dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ["datetime", "datetime64"]:
return np.array("NaT", dtype=DT64NS_DTYPE)
elif dtype in ["timedelta", "timedelta64"]:
return np.array("NaT", dtype=TD64NS_DTYPE)
return np.nan
def maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype, compat: bool = True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : bool, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if needs_i8_conversion(dtype):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
if compat:
return False
return np.nan
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(np.asarray(arr))]
def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool:
"""
isna check that excludes incompatible dtypes
Parameters
----------
obj : object
dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
Returns
-------
bool
"""
if not lib.is_scalar(obj) or not isna(obj):
return False
if dtype.kind == "M":
return not isinstance(obj, np.timedelta64)
if dtype.kind == "m":
return not isinstance(obj, np.datetime64)
if dtype.kind in ["i", "u", "f", "c"]:
# Numeric
return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64))
# must be PeriodDType
return not isinstance(obj, (np.datetime64, np.timedelta64))
def isna_all(arr: ArrayLike) -> bool:
"""
Optimized equivalent to isna(arr).all()
"""
total_len = len(arr)
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases.
# parameters 1000 and 40 were chosen arbitrarily
chunk_len = max(total_len // 40, 1000)
dtype = arr.dtype
if dtype.kind == "f":
checker = nan_checker
elif dtype.kind in ["m", "M"] or dtype.type is Period:
checker = lambda x: np.asarray(x.view("i8")) == iNaT
else:
checker = lambda x: _isna_ndarraylike(x, inf_as_na=INF_AS_NA)
return all(
checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len)
)
| bsd-3-clause |
zorroblue/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 12 | 4320 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of squared
distances to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 43 | 24671 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from numpy.testing import run_module_suite
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
KaiWeiChang/vowpal_wabbit | utl/vw-hyperopt.py | 7 | 15612 | #!/usr/bin/env python
# coding: utf-8
"""
Github version of hyperparameter optimization for Vowpal Wabbit via hyperopt
"""
__author__ = 'kurtosis'
from hyperopt import hp, fmin, tpe, rand, Trials, STATUS_OK
from sklearn.metrics import roc_curve, auc, log_loss, precision_recall_curve
import numpy as np
from datetime import datetime as dt
import subprocess, shlex
from math import exp, log
import argparse
import re
import logging
import json
import matplotlib
from matplotlib import pyplot as plt
try:
import seaborn as sns
except ImportError:
print ("Warning: seaborn is not installed. "
"Without seaborn, standard matplotlib plots will not look very charming. "
"It's recommended to install it via pip install seaborn")
def read_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--searcher', type=str, default='tpe', choices=['tpe', 'rand'])
parser.add_argument('--max_evals', type=int, default=100)
parser.add_argument('--train', type=str, required=True, help="training set")
parser.add_argument('--holdout', type=str, required=True, help="holdout set")
parser.add_argument('--vw_space', type=str, required=True, help="hyperparameter search space (must be 'quoted')")
parser.add_argument('--outer_loss_function', default='logistic',
choices=['logistic', 'roc-auc']) # TODO: implement squared, hinge, quantile, PR-auc
parser.add_argument('--regression', action='store_true', default=False, help="""regression (continuous class labels)
or classification (-1 or 1, default value).""")
parser.add_argument('--plot', action='store_true', default=False, help=("Plot the results in the end. "
"Requires matplotlib and "
"(optionally) seaborn to be installed."))
args = parser.parse_args()
return args
class HyperoptSpaceConstructor(object):
"""
Takes command-line input and transforms it into hyperopt search space
An example of command-line input:
--algorithms=ftrl,sgd --l2=1e-8..1e-4~LO -l=0.01..10~L --ftrl_beta=0.01..1 --passes=1..10~I -q=SE+SZ+DR,SE~O
"""
def __init__(self, command):
self.command = command
self.space = None
self.algorithm_metadata = {
'ftrl': {'arg': '--ftrl', 'prohibited_flags': set()},
'sgd': {'arg': '', 'prohibited_flags': {'--ftrl_alpha', '--ftrl_beta'}}
}
self.range_pattern = re.compile("[^~]+") # re.compile("(?<=\[).+(?=\])")
self.distr_pattern = re.compile("(?<=~)[IOL]*") # re.compile("(?<=\])[IOL]*")
self.only_continuous = re.compile("(?<=~)[IL]*") # re.compile("(?<=\])[IL]*")
def _process_vw_argument(self, arg, value, algorithm):
try:
distr_part = self.distr_pattern.findall(value)[0]
except IndexError:
distr_part = ''
range_part = self.range_pattern.findall(value)[0]
is_continuous = '..' in range_part
ocd = self.only_continuous.findall(value)
if not is_continuous and len(ocd)> 0 and ocd[0] != '':
raise ValueError(("Need a range instead of a list of discrete values to define "
"uniform or log-uniform distribution. "
"Please, use [min..max]%s form") % (distr_part))
if is_continuous and arg == '-q':
raise ValueError(("You must directly specify namespaces for quadratic features "
"as a list of values, not as a parametric distribution"))
hp_choice_name = "_".join([algorithm, arg.replace('-', '')])
try_omit_zero = 'O' in distr_part
distr_part = distr_part.replace('O', '')
if is_continuous:
vmin, vmax = [float(i) for i in range_part.split('..')]
if distr_part == 'L':
distrib = hp.loguniform(hp_choice_name, log(vmin), log(vmax))
elif distr_part == '':
distrib = hp.uniform(hp_choice_name, vmin, vmax)
elif distr_part == 'I':
distrib = hp.quniform(hp_choice_name, vmin, vmax, 1)
elif distr_part in {'LI', 'IL'}:
distrib = hp.qloguniform(hp_choice_name, log(vmin), log(vmax), 1)
else:
raise ValueError("Cannot recognize distribution: %s" % (distr_part))
else:
possible_values = range_part.split(',')
if arg == '-q':
possible_values = [v.replace('+', ' -q ') for v in possible_values]
distrib = hp.choice(hp_choice_name, possible_values)
if try_omit_zero:
hp_choice_name_outer = hp_choice_name + '_outer'
distrib = hp.choice(hp_choice_name_outer, ['omit', distrib])
return distrib
def string_to_pyll(self):
line = shlex.split(self.command)
algorithms = ['sgd']
for arg in line:
arg, value = arg.split('=')
if arg == '--algorithms':
algorithms = set(self.range_pattern.findall(value)[0].split(','))
if tuple(self.distr_pattern.findall(value)) not in {(), ('O',)}:
raise ValueError(("Distribution options are prohibited for --algorithms flag. "
"Simply list the algorithms instead (like --algorithms=ftrl,sgd)"))
elif self.distr_pattern.findall(value) == ['O']:
algorithms.add('sgd')
for algo in algorithms:
if algo not in self.algorithm_metadata:
raise NotImplementedError(("%s algorithm is not found. "
"Supported algorithms by now are %s")
% (algo, str(self.algorithm_metadata.keys())))
break
self.space = {algo: {'type': algo, 'argument': self.algorithm_metadata[algo]['arg']} for algo in algorithms}
for algo in algorithms:
for arg in line:
arg, value = arg.split('=')
if arg == '--algorithms':
continue
if arg not in self.algorithm_metadata[algo]['prohibited_flags']:
distrib = self._process_vw_argument(arg, value, algo)
self.space[algo][arg] = distrib
else:
pass
self.space = hp.choice('algorithm', self.space.values())
class HyperOptimizer(object):
def __init__(self, train_set, holdout_set, command, max_evals=100,
outer_loss_function='logistic',
searcher='tpe', is_regression=False):
self.train_set = train_set
self.holdout_set = holdout_set
self.train_model = './current.model'
self.holdout_pred = './holdout.pred'
self.trials_output = './trials.json'
self.hyperopt_progress_plot = './hyperopt_progress.png'
self.log = './log.log'
self.logger = self._configure_logger()
# hyperopt parameter sample, converted into a string with flags
self.param_suffix = None
self.train_command = None
self.validate_command = None
self.y_true_train = []
self.y_true_holdout = []
self.outer_loss_function = outer_loss_function
self.space = self._get_space(command)
self.max_evals = max_evals
self.searcher = searcher
self.is_regression = is_regression
self.trials = Trials()
self.current_trial = 0
def _get_space(self, command):
hs = HyperoptSpaceConstructor(command)
hs.string_to_pyll()
return hs.space
def _configure_logger(self):
LOGGER_FORMAT = "%(asctime)s,%(msecs)03d %(levelname)-8s [%(name)s/%(module)s:%(lineno)d]: %(message)s"
LOGGER_DATEFMT = "%Y-%m-%d %H:%M:%S"
LOGFILE = self.log
logging.basicConfig(format=LOGGER_FORMAT,
datefmt=LOGGER_DATEFMT,
level=logging.DEBUG)
formatter = logging.Formatter(LOGGER_FORMAT, datefmt=LOGGER_DATEFMT)
file_handler = logging.FileHandler(LOGFILE)
file_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(file_handler)
return logger
def get_hyperparam_string(self, **kwargs):
for arg in ['--passes']: #, '--rank', '--lrq']:
if arg in kwargs:
kwargs[arg] = int(kwargs[arg])
#print 'KWARGS: ', kwargs
flags = [key for key in kwargs if key.startswith('-')]
for flag in flags:
if kwargs[flag] == 'omit':
del kwargs[flag]
self.param_suffix = ' '.join(['%s %s' % (key, kwargs[key]) for key in kwargs if key.startswith('-')])
self.param_suffix += ' %s' % (kwargs['argument'])
def compose_vw_train_command(self):
data_part = ('vw -d %s -f %s --holdout_off -c '
% (self.train_set, self.train_model))
self.train_command = ' '.join([data_part, self.param_suffix])
def compose_vw_validate_command(self):
data_part = 'vw -t -d %s -i %s -p %s --holdout_off -c' \
% (self.holdout_set, self.train_model, self.holdout_pred)
self.validate_command = data_part
def fit_vw(self):
self.compose_vw_train_command()
self.logger.info("executing the following command (training): %s" % self.train_command)
subprocess.call(shlex.split(self.train_command))
def validate_vw(self):
self.compose_vw_validate_command()
self.logger.info("executing the following command (validation): %s" % self.validate_command)
subprocess.call(shlex.split(self.validate_command))
def get_y_true_train(self):
self.logger.info("loading true train class labels...")
yh = open(self.train_set, 'r')
self.y_true_train = []
for line in yh:
self.y_true_train.append(int(line.strip()[0:2]))
if not self.is_regression:
self.y_true_train = [(i + 1.) / 2 for i in self.y_true_train]
self.logger.info("train length: %d" % len(self.y_true_train))
def get_y_true_holdout(self):
self.logger.info("loading true holdout class labels...")
yh = open(self.holdout_set, 'r')
self.y_true_holdout = []
for line in yh:
self.y_true_holdout.append(int(line.strip()[0:2]))
if not self.is_regression:
self.y_true_holdout = [(i + 1.) / 2 for i in self.y_true_holdout]
self.logger.info("holdout length: %d" % len(self.y_true_holdout))
def validation_metric_vw(self):
v = open('%s' % self.holdout_pred, 'r')
y_pred_holdout = []
for line in v:
y_pred_holdout.append(float(line.strip()))
if self.outer_loss_function == 'logistic':
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
loss = log_loss(self.y_true_holdout, y_pred_holdout_proba)
elif self.outer_loss_function == 'squared': # TODO: write it
pass
elif self.outer_loss_function == 'hinge': # TODO: write it
pass
elif self.outer_loss_function == 'roc-auc':
y_pred_holdout_proba = [1. / (1 + exp(-i)) for i in y_pred_holdout]
fpr, tpr, _ = roc_curve(self.y_true_holdout, y_pred_holdout_proba)
loss = -auc(fpr, tpr)
self.logger.info('parameter suffix: %s' % self.param_suffix)
self.logger.info('loss value: %.6f' % loss)
return loss
def hyperopt_search(self, parallel=False): # TODO: implement parallel search with MongoTrials
def objective(kwargs):
start = dt.now()
self.current_trial += 1
self.logger.info('\n\nStarting trial no.%d' % self.current_trial)
self.get_hyperparam_string(**kwargs)
self.fit_vw()
self.validate_vw()
loss = self.validation_metric_vw()
finish = dt.now()
elapsed = finish - start
self.logger.info("evaluation time for this step: %s" % str(elapsed))
# clean up
subprocess.call(shlex.split('rm %s %s' % (self.train_model, self.holdout_pred)))
to_return = {'status': STATUS_OK,
'loss': loss, # TODO: include also train loss tracking in order to prevent overfitting
'eval_time': elapsed.seconds,
'train_command': self.train_command,
'current_trial': self.current_trial
}
return to_return
self.trials = Trials()
if self.searcher == 'tpe':
algo = tpe.suggest
elif self.searcher == 'rand':
algo = rand.suggest
logging.debug("starting hypersearch...")
best_params = fmin(objective, space=self.space, trials=self.trials, algo=algo, max_evals=self.max_evals)
self.logger.debug("the best hyperopt parameters: %s" % str(best_params))
json.dump(self.trials.results, open(self.trials_output, 'w'))
self.logger.info('All the trials results are saved at %s' % self.trials_output)
best_configuration = self.trials.results[np.argmin(self.trials.losses())]['train_command']
best_loss = self.trials.results[np.argmin(self.trials.losses())]['loss']
self.logger.info("\n\nA full training command with the best hyperparameters: \n%s\n\n" % best_configuration)
self.logger.info("\n\nThe best holdout loss value: \n%s\n\n" % best_loss)
return best_configuration, best_loss
def plot_progress(self):
try:
sns.set_palette('Set2')
sns.set_style("darkgrid", {"axes.facecolor": ".95"})
except:
pass
self.logger.debug('plotting...')
plt.figure(figsize=(15,10))
plt.subplot(211)
plt.plot(self.trials.losses(), '.', markersize=12)
plt.title('Per-Iteration Outer Loss', fontsize=16)
plt.ylabel('Outer loss function value')
if self.outer_loss_function in ['logloss']:
plt.yscale('log')
xticks = [int(i) for i in np.linspace(plt.xlim()[0], plt.xlim()[1], min(len(self.trials.losses()), 11))]
plt.xticks(xticks, xticks)
plt.subplot(212)
plt.plot(np.minimum.accumulate(self.trials.losses()), '.', markersize=12)
plt.title('Cumulative Minimum Outer Loss', fontsize=16)
plt.xlabel('Iteration number')
plt.ylabel('Outer loss function value')
xticks = [int(i) for i in np.linspace(plt.xlim()[0], plt.xlim()[1], min(len(self.trials.losses()), 11))]
plt.xticks(xticks, xticks)
plt.tight_layout()
plt.savefig(self.hyperopt_progress_plot)
self.logger.info('The diagnostic hyperopt progress plot is saved: %s' % self.hyperopt_progress_plot)
def main():
args = read_arguments()
h = HyperOptimizer(train_set=args.train, holdout_set=args.holdout, command=args.vw_space,
max_evals=args.max_evals,
outer_loss_function=args.outer_loss_function,
searcher=args.searcher, is_regression=args.regression)
h.get_y_true_holdout()
h.hyperopt_search()
if args.plot:
h.plot_progress()
if __name__ == '__main__':
main() | bsd-3-clause |
rishikksh20/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
beepee14/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
ephes/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
phoenixstar7/pmtk3 | python/demos/ch02/robustDemo.py | 7 | 1150 | #!/usr/bin/env python
import numpy as np
import matplotlib.pylab as pl
from scipy.stats import t, laplace, norm
a = np.random.randn(30)
outliers = np.array([8, 8.75, 9.5])
pl.hist(a, 7, weights=[1 / 30] * 30, rwidth=0.8)
#fit without outliers
x = np.linspace(-5, 10, 500)
loc, scale = norm.fit(a)
n = norm.pdf(x, loc=loc, scale=scale)
loc, scale = laplace.fit(a)
l = laplace.pdf(x, loc=loc, scale=scale)
fd, loc, scale = t.fit(a)
s = t.pdf(x, fd, loc=loc, scale=scale)
pl.plot(x, n, 'k>',
x, s, 'r-',
x, l, 'b--')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('robustDemo_without_outliers.png')
#add the outliers
pl.figure()
pl.hist(a, 7, weights=[1 / 33] * 30, rwidth=0.8)
pl.hist(outliers, 3, weights=[1 / 33] * 3, rwidth=0.8)
aa = np.hstack((a, outliers))
loc, scale = norm.fit(aa)
n = norm.pdf(x, loc=loc, scale=scale)
loc, scale = laplace.fit(aa)
l = laplace.pdf(x, loc=loc, scale=scale)
fd, loc, scale = t.fit(aa)
t = t.pdf(x, fd, loc=loc, scale=scale)
pl.plot(x, n, 'k:',
x, t, 'r-',
x, l, 'b--')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('robustDemo_with_outliers.png')
pl.show()
| mit |
navigator8972/pylqr | pylqr_trajctrl.py | 1 | 8885 | """
LQR based trajectory controller
"""
from __future__ import print_function
try:
import jax.numpy as np
except ImportError:
import numpy as np
import pylqr
class PyLQR_TrajCtrl():
"""
Use second-order system and acceleration as system and input
The trajectory is given as a set of reference waypoints and associated tracking weights
or general cost function (use finite difference to have gradient & hessian)
"""
def __init__(self, R=.01, dt=0.01, use_autograd=False):
#control penalty, smoothness of the trajectory
self.R_ = R
self.dt_ = dt
self.Q_vel_ratio_ = 10
#desired functions for plant dynamics and cost
self.plant_dyn_ = None
self.plant_dyn_dx_ = None
self.plant_dyn_du_ = None
self.cost_ = None
self.cost_dx_ = None
self.cost_du_ = None
self.cost_dxx_ = None
self.cost_duu_ = None
self.cost_dux_ = None
self.ilqr_ = None
self.use_autograd=use_autograd
return
def build_ilqr_general_solver(self, cost_func, n_dims=2, T=100):
#figure out dimension
self.T_ = T
self.n_dims_ = n_dims
#build dynamics, second-order linear dynamical system
self.A_ = np.eye(self.n_dims_*2)
self.A_[0:self.n_dims_, self.n_dims_:] = np.eye(self.n_dims_) * self.dt_
self.B_ = np.zeros((self.n_dims_*2, self.n_dims_))
self.B_[self.n_dims_:, :] = np.eye(self.n_dims_) * self.dt_
self.plant_dyn_ = lambda x, u, t, aux: np.dot(self.A_, x) + np.dot(self.B_, u)
self.plant_dyn_dx_ = lambda x, u, t, aux: self.A_
self.plant_dyn_du_ = lambda x, u, t, aux: self.B_
self.cost_ = cost_func
#build an iLQR solver based on given functions...
self.ilqr_ = pylqr.PyLQR_iLQRSolver(T=self.T_-1, plant_dyn=self.plant_dyn_, cost=self.cost_, use_autograd=self.use_autograd)
return
def build_ilqr_tracking_solver(self, ref_pnts, weight_mats):
#figure out dimension
self.T_ = len(ref_pnts)
self.n_dims_ = len(ref_pnts[0])
self.ref_array = np.copy(ref_pnts)
self.weight_array = [mat for mat in weight_mats]
#clone weight mats if there are not enough weight mats
for i in range(self.T_ - len(self.weight_array)):
self.weight_array.append(self.weight_array[-1])
#build dynamics, second-order linear dynamical system
self.A_ = np.eye(self.n_dims_*2)
self.A_[0:self.n_dims_, self.n_dims_:] = np.eye(self.n_dims_) * self.dt_
self.B_ = np.zeros((self.n_dims_*2, self.n_dims_))
self.B_[self.n_dims_:, :] = np.eye(self.n_dims_) * self.dt_
self.plant_dyn_ = lambda x, u, t, aux: np.dot(self.A_, x) + np.dot(self.B_, u)
#build cost functions, quadratic ones
def tmp_cost_func(x, u, t, aux):
err = x[0:self.n_dims_] - self.ref_array[t]
#autograd does not allow A.dot(B)
cost = np.dot(np.dot(err, self.weight_array[t]), err) + np.sum(u**2) * self.R_
if t > self.T_-1:
#regularize velocity for the termination point
#autograd does not allow self increment
cost = cost + np.sum(x[self.n_dims_:]**2) * self.R_ * self.Q_vel_ratio_
return cost
self.cost_ = tmp_cost_func
self.ilqr_ = pylqr.PyLQR_iLQRSolver(T=self.T_-1, plant_dyn=self.plant_dyn_, cost=self.cost_, use_autograd=self.use_autograd)
if not self.use_autograd:
self.plant_dyn_dx_ = lambda x, u, t, aux: self.A_
self.plant_dyn_du_ = lambda x, u, t, aux: self.B_
def tmp_cost_func_dx(x, u, t, aux):
err = x[0:self.n_dims_] - self.ref_array[t]
grad = np.concatenate([2*err.dot(self.weight_array[t]), np.zeros(self.n_dims_)])
if t > self.T_-1:
grad[self.n_dims_:] = grad[self.n_dims_:] + 2 * self.R_ * self.Q_vel_ratio_ * x[self.n_dims_, :]
return grad
self.cost_dx_ = tmp_cost_func_dx
self.cost_du_ = lambda x, u, t, aux: 2 * self.R_ * u
def tmp_cost_func_dxx(x, u, t, aux):
hessian = np.zeros((2*self.n_dims_, 2*self.n_dims_))
hessian[0:self.n_dims_, 0:self.n_dims_] = 2 * self.weight_array[t]
if t > self.T_-1:
hessian[self.n_dims_:, self.n_dims_:] = 2 * np.eye(self.n_dims_) * self.R_ * self.Q_vel_ratio_
return hessian
self.cost_dxx_ = tmp_cost_func_dxx
self.cost_duu_ = lambda x, u, t, aux: 2 * self.R_ * np.eye(self.n_dims_)
self.cost_dux_ = lambda x, u, t, aux: np.zeros((self.n_dims_, 2*self.n_dims_))
#build an iLQR solver based on given functions...
self.ilqr_.plant_dyn_dx = self.plant_dyn_dx_
self.ilqr_.plant_dyn_du = self.plant_dyn_du_
self.ilqr_.cost_dx = self.cost_dx_
self.ilqr_.cost_du = self.cost_du_
self.ilqr_.cost_dxx = self.cost_dxx_
self.ilqr_.cost_duu = self.cost_duu_
self.ilqr_.cost_dux = self.cost_dux_
return
def synthesize_trajectory(self, x0, u_array=None, n_itrs=50, tol=1e-6, verbose=True):
if self.ilqr_ is None:
print('No iLQR solver has been prepared.')
return None
#initialization doesn't matter as global optimality can be guaranteed?
if u_array is None:
u_init = [np.zeros(self.n_dims_) for i in range(self.T_-1)]
else:
u_init = u_array
x_init = np.concatenate([x0, np.zeros(self.n_dims_)])
res = self.ilqr_.ilqr_iterate(x_init, u_init, n_itrs=n_itrs, tol=tol, verbose=verbose)
return res['x_array_opt'][:, 0:self.n_dims_]
"""
Test case, 2D trajectory to track a sinuoidal..
"""
import matplotlib.pyplot as plt
def PyLQR_TrajCtrl_TrackingTest():
n_pnts = 200
x_coord = np.linspace(0.0, 2*np.pi, n_pnts)
y_coord = np.sin(x_coord)
#concatenate to have trajectory
ref_traj = np.array([x_coord, y_coord]).T
weight_mats = [ np.eye(ref_traj.shape[1])*100 ]
#draw reference trajectory
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hold(True)
ax.plot(ref_traj[:, 0], ref_traj[:, 1], '.-k', linewidth=3.5)
ax.plot([ref_traj[0, 0]], [ref_traj[0, 1]], '*k', markersize=16)
lqr_traj_ctrl = PyLQR_TrajCtrl(use_autograd=True)
lqr_traj_ctrl.build_ilqr_tracking_solver(ref_traj, weight_mats)
n_queries = 5
for _ in range(n_queries):
#start from a perturbed point
x0 = ref_traj[0, :] + np.random.rand(2) * 2 - 1
syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0)
#plot it
ax.plot(syn_traj[:, 0], syn_traj[:, 1], linewidth=3.5)
plt.show()
return
def PyLQR_TrajCtrl_GeneralTest():
#build RBF basis
rbf_basis = np.array([
[-1.0, -1.0],
[-1.0, 1.0],
[1.0, -1.0],
[1.0, 1.0]
])
gamma = 1
T = 100
R = 1e-5
# rbf_funcs = [lambda x, u, t, aux: np.exp(-gamma*np.linalg.norm(x[0:2]-basis)**2) + .01*np.linalg.norm(u)**2 for basis in rbf_basis]
rbf_funcs = [
lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[0])**2) + R*np.linalg.norm(u)**2,
lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[1])**2) + R*np.linalg.norm(u)**2,
lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[2])**2) + R*np.linalg.norm(u)**2,
lambda x, u, t, aux: -np.exp(-gamma*np.linalg.norm(x[0:2]-rbf_basis[3])**2) + R*np.linalg.norm(u)**2
]
weights = np.array([.75, .5, .25, 1.])
weights = weights / (np.sum(weights) + 1e-6)
cost_func = lambda x, u, t, aux: np.sum(weights * np.array([basis_func(x, u, t, aux) for basis_func in rbf_funcs]))
lqr_traj_ctrl = PyLQR_TrajCtrl(use_autograd=True)
lqr_traj_ctrl.build_ilqr_general_solver(cost_func, n_dims=rbf_basis.shape[1], T=T)
n_eval_pnts = 50
coords = np.linspace(-2.5, 2.5, n_eval_pnts)
xv, yv = np.meshgrid(coords, coords)
z = [[cost_func(np.array([xv[i, j], yv[i, j]]), np.zeros(2), None, None) for j in range(yv.shape[1])] for i in range(len(xv))]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.contour(xv, yv, z)
n_queries = 5
u_array = np.random.rand(2, T-1).T * 2 - 1
for i in range(n_queries):
#start from a perturbed point
x0 = np.random.rand(2) * 4 - 2
syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0, u_array)
#plot it
ax.plot([x0[0]], [x0[1]], 'k*', markersize=12.0)
ax.plot(syn_traj[:, 0], syn_traj[:, 1], linewidth=3.5)
plt.show()
return
if __name__ == '__main__':
# PyLQR_TrajCtrl_TrackingTest()
PyLQR_TrajCtrl_GeneralTest() | gpl-3.0 |
chankeypathak/pandas-matplotlib-examples | Lesson 2/pandas_matplot_txt.py | 1 | 1307 | import pandas as pd
import matplotlib.pyplot as plt
from numpy import random
import os
# The inital set of baby names
names = ['Bob','Jessica','Mary','John','Mel']
# This will ensure the random samples below can be reproduced.
# This means the random samples will always be identical.
random.seed(500)
random_names = [names[random.randint(low=0,high=len(names))] for i in range(1000)]
# The number of births per name for the year 1880
births = [random.randint(low=0,high=1000) for i in range(1000)]
BabyDataSet = list(zip(random_names,births))
#print BabyDataSet[:10]
df = pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births'])
df.to_csv('births1880.txt',index=False,header=False)
Location = 'births1880.txt'
df = pd.read_csv(Location, names=['Names','Births'])
#metadata
#print df.info()
#See first five records using head
#print df.head()
#See last five records using tail
#print df.tail()
os.remove(Location)
#df['Names'].unique()
#print(df['Names'].describe())
# Create a groupby object
name = df.groupby('Names')
# Apply the sum function to the groupby object
df = name.sum()
Sorted = df.sort_values(['Births'], ascending=False)
#print Sorted.head(1)
# Create graph
df['Births'].plot.bar()
print("The most popular name")
df.sort_values(by='Births', ascending=False)
plt.show()
| mit |
brchiu/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 17 | 5198 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=(0, 1)),
cov=numpy.squeeze(current_prediction["covariance"], axis=(0, 1)))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
sjshao09/KaggleRH | gunja_split.py | 1 | 22414 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import model_selection, preprocessing
import xgboost as xgb
import datetime
# ----------------- Settings ----------------- #
EN_CROSSVALIDATION = True
######################### Train for Investment Data ############################
DEFAULT_TRAIN_ROUNDS = 409
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
macro = pd.read_csv('input/macro.csv', parse_dates=['timestamp'])
# ----------------- Data Cleaning ----------------- #
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df = df[(df.full_sq>1)|(df.life_sq>1)]
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
df = df[df.full_sq<400]
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
df = df[df.life_sq<300]
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# Test Set
test_df.loc[test_df.id==30938, 'full_sq'] = 37.8
test_df.loc[test_df.id==35857, 'full_sq'] = 42.07
test_df.loc[test_df.id==35108, 'full_sq'] = 40.3
test_df.loc[test_df.id==33648, 'num_room'] = 1
# Clean - Full Sq
test_df.loc[(test_df.full_sq<10) & (test_df.life_sq>1), 'full_sq'] = test_df.life_sq
# Clean - Life Sq
test_df.loc[test_df.life_sq>test_df.full_sq*2, 'life_sq'] = test_df.life_sq/10
test_df.loc[test_df.life_sq > test_df.full_sq, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < 5, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < test_df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
test_df.loc[test_df.kitch_sq < 2, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.full_sq * 0.5, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
test_df.loc[test_df.build_year<1000, 'build_year'] = np.nan
test_df.loc[test_df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
test_df.loc[test_df.num_room<1, 'num_room'] = np.nan
test_df.loc[(test_df.num_room>4) & (test_df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
test_df.loc[test_df.floor==0, 'floor'] = np.nan
test_df.loc[test_df.max_floor==0, 'max_floor'] = np.nan
test_df.loc[(test_df.max_floor==1) & (test_df.floor>1), 'max_floor'] = np.nan
test_df.loc[test_df.max_floor>50, 'max_floor'] = np.nan
test_df.loc[test_df.floor>test_df.max_floor, 'floor'] = np.nan
# ----------------- New Features ----------------- #
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
# ----------------- Train for Investment Data ----------------- #
df = df[df.product_type=="Investment"]
#df = df[df.price_doc>1000000]
df = df[df.price_doc/df.full_sq <= np.exp(13.05)]
#df = df[df.price_doc/df.full_sq >= np.exp(9)]
test_df.product_type = "Investment"
y_train = df["price_doc"] * 0.97
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
x_test = x_all[num_train:]
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# ----------------- Cross Validation ----------------- #
xgb_params = {
'eta': 0.03,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 1,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1,
'seed': 0
}
if EN_CROSSVALIDATION:
print "[INFO] Cross Validation..."
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=20,
verbose_eval=20, show_stdv=True)
DEFAULT_TRAIN_ROUNDS = len(cv_output)
print "[INFO] Optimal Training Rounds =", DEFAULT_TRAIN_ROUNDS
# ----------------- Training ----------------- #
print "[INFO] Training for", DEFAULT_TRAIN_ROUNDS, "rounds..."
model = xgb.train(xgb_params, dtrain, num_boost_round=DEFAULT_TRAIN_ROUNDS,
evals=[(dtrain, 'train')], verbose_eval=50)
'''
# ----------------- Predicting Training Data for Ensemble ----------------- #
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df.loc[(df.full_sq<=1) & (df.life_sq<=1), 'full_sq'] = np.nan
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
df['product_type'] = "Investment"
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
y_train = df["price_doc"]
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
dtrain = xgb.DMatrix(x_train, y_train)
train_predict = model.predict(dtrain)
invest_train_predict_df = pd.DataFrame({'id': df.id, 'price_doc': train_predict})
# ----------------- Predicting Training Data for Ensemble -------end------- #
'''
y_predict = model.predict(dtest)
gunja_invest = pd.DataFrame({'id': test_df.id, 'price_doc': y_predict})
print gunja_invest.head()
########################## Train for OwnerOccupier Data #########################
# ----------------- Settings ----------------- #
DEFAULT_TRAIN_ROUNDS = 704
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
macro = pd.read_csv('input/macro.csv', parse_dates=['timestamp'])
# ----------------- Data Cleaning ----------------- #
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df = df[(df.full_sq>1)|(df.life_sq>1)]
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
df = df[df.full_sq<400]
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
df = df[df.life_sq<300]
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# Test Set
test_df.loc[test_df.id==30938, 'full_sq'] = 37.8
test_df.loc[test_df.id==35857, 'full_sq'] = 42.07
test_df.loc[test_df.id==35108, 'full_sq'] = 40.3
test_df.loc[test_df.id==33648, 'num_room'] = 1
# Clean - Full Sq
test_df.loc[(test_df.full_sq<10) & (test_df.life_sq>1), 'full_sq'] = test_df.life_sq
# Clean - Life Sq
test_df.loc[test_df.life_sq>test_df.full_sq*2, 'life_sq'] = test_df.life_sq/10
test_df.loc[test_df.life_sq > test_df.full_sq, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < 5, 'life_sq'] = np.nan
test_df.loc[test_df.life_sq < test_df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
test_df.loc[test_df.kitch_sq < 2, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.full_sq * 0.5, 'kitch_sq'] = np.nan
test_df.loc[test_df.kitch_sq > test_df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
test_df.loc[test_df.build_year<1000, 'build_year'] = np.nan
test_df.loc[test_df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
test_df.loc[test_df.num_room<1, 'num_room'] = np.nan
test_df.loc[(test_df.num_room>4) & (test_df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
test_df.loc[test_df.floor==0, 'floor'] = np.nan
test_df.loc[test_df.max_floor==0, 'max_floor'] = np.nan
test_df.loc[(test_df.max_floor==1) & (test_df.floor>1), 'max_floor'] = np.nan
test_df.loc[test_df.max_floor>50, 'max_floor'] = np.nan
test_df.loc[test_df.floor>test_df.max_floor, 'floor'] = np.nan
# ----------------- New Features ----------------- #
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
# ----------------- Train for OwnerOccupier Data ----------------- #
df = df[df.product_type=="OwnerOccupier"]
df = df[df.price_doc/df.full_sq <= np.exp(13.15)]
df = df[df.price_doc/df.full_sq >= np.exp(10.4)]
test_df.product_type = "OwnerOccupier"
y_train = df["price_doc"]
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
x_test = x_all[num_train:]
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# ----------------- Cross Validation ----------------- #
xgb_params = {
'eta': 0.03,
'max_depth': 5,
'subsample': 0.7,
'colsample_bytree': 1,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent': 1,
'seed': 0
}
if EN_CROSSVALIDATION:
print "[INFO] Cross Validation..."
cv_output = xgb.cv(xgb_params, dtrain, num_boost_round=1000, early_stopping_rounds=10,
verbose_eval=20, show_stdv=True)
DEFAULT_TRAIN_ROUNDS = len(cv_output)
print "[INFO] Optimal Training Rounds =", DEFAULT_TRAIN_ROUNDS
# ----------------- Training ----------------- #
print "[INFO] Training for", DEFAULT_TRAIN_ROUNDS, "rounds..."
model = xgb.train(xgb_params, dtrain, num_boost_round=DEFAULT_TRAIN_ROUNDS,
evals=[(dtrain, 'train')], verbose_eval=50)
'''
# ----------------- Predicting Training Data for Ensemble ----------------- #
#load files
df = pd.read_csv('input/train.csv', parse_dates=['timestamp'])
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
# Training Set
df.loc[df.id==13549, 'life_sq'] = 74
df.loc[df.id==10092, 'build_year'] = 2007
df.loc[df.id==10092, 'state'] = 3
df.loc[df.id==13120, 'build_year'] = 1970
df.loc[df.id==25943, 'max_floor'] = 17
# Clean - Full Sq
df.loc[(df.full_sq<=1) & (df.life_sq<=1), 'full_sq'] = np.nan
df.loc[(df.full_sq<10) & (df.life_sq>1), 'full_sq'] = df.life_sq
# Clean - Life Sq
df.loc[df.life_sq > df.full_sq*4, 'life_sq'] = df.life_sq/10
df.loc[df.life_sq > df.full_sq, 'life_sq'] = np.nan
df.loc[df.life_sq < 5, 'life_sq'] = np.nan
df.loc[df.life_sq < df.full_sq * 0.3, 'life_sq'] = np.nan
# Clean - Kitch Sq
df.loc[df.kitch_sq < 2, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.full_sq * 0.5, 'kitch_sq'] = np.nan
df.loc[df.kitch_sq > df.life_sq, 'kitch_sq'] = np.nan
# Clean - Build Year
df.loc[df.build_year<1000, 'build_year'] = np.nan
df.loc[df.build_year>2050, 'build_year'] = np.nan
# Clean - Num Room
df.loc[df.num_room<1, 'num_room'] = np.nan
df.loc[(df.num_room>4) & (df.full_sq<60), 'num_room'] = np.nan
# Clean - Floor and Max Floor
df.loc[df.floor==0, 'floor'] = np.nan
df.loc[df.max_floor==0, 'max_floor'] = np.nan
df.loc[(df.max_floor==1) & (df.floor>1), 'max_floor'] = np.nan
df.loc[df.max_floor>50, 'max_floor'] = np.nan
df.loc[df.floor>df.max_floor, 'floor'] = np.nan
# month_year_cnt
month_year = (df.timestamp.dt.month + df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
df['month_year_cnt'] = month_year.map(month_year_cnt_map)
month_year = (test_df.timestamp.dt.month + test_df.timestamp.dt.year * 100)
month_year_cnt_map = month_year.value_counts().to_dict()
test_df['month_year_cnt'] = month_year.map(month_year_cnt_map)
# week_year_cnt
week_year = (df.timestamp.dt.weekofyear + df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
df['week_year_cnt'] = week_year.map(week_year_cnt_map)
week_year = (test_df.timestamp.dt.weekofyear + test_df.timestamp.dt.year * 100)
week_year_cnt_map = week_year.value_counts().to_dict()
test_df['week_year_cnt'] = week_year.map(week_year_cnt_map)
# month
df['month'] = df.timestamp.dt.month
test_df['month'] = test_df.timestamp.dt.month
# day of week
df['dow'] = df.timestamp.dt.dayofweek
test_df['dow'] = test_df.timestamp.dt.dayofweek
# floor/max_floor
df['floor/max_floor'] = df['floor'] / df['max_floor'].astype(float)
test_df['floor/max_floor'] = test_df['floor'] / test_df['max_floor'].astype(float)
# kitch_sq/full_sq
df["kitch_sq/full_sq"] = df["kitch_sq"] / df["full_sq"].astype(float)
test_df["kitch_sq/full_sq"] = test_df["kitch_sq"] / test_df["full_sq"].astype(float)
# Avg Room Size
df['avg_room_size'] = df['life_sq'] / df['num_room'].astype(float)
test_df['avg_room_size'] = test_df['life_sq'] / test_df['num_room'].astype(float)
# Apartment Name
df['apartment_name'] = df['sub_area'] + df['metro_km_avto'].astype(str)
test_df['apartment_name'] = test_df['sub_area'] + test_df['metro_km_avto'].astype(str)
df.product_type = "OwnerOccupier"
x_train = df.drop(["id", "timestamp", "price_doc"], axis=1)
y_train = df["price_doc"]
x_test = test_df.drop(["id", "timestamp"], axis=1)
x_all = pd.concat([x_train, x_test])
# Feature Encoding
for c in x_all.columns:
if x_all[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(x_all[c].values))
x_all[c] = lbl.transform(list(x_all[c].values))
# Separate Training and Test Data
num_train = len(x_train)
x_train = x_all[:num_train]
dtrain = xgb.DMatrix(x_train, y_train)
train_predict = model.predict(dtrain)
owner_train_predict_df = pd.DataFrame({'id': df.id, 'price_doc': train_predict})
# ----------------- Predicting Training Data for Ensemble -------end------- #
'''
y_predict = model.predict(dtest)
gunja_owner = pd.DataFrame({'id': test_df.id, 'price_doc': y_predict})
print gunja_owner.head()
############################## Merge #############################
'''
# For Training Data Set
df = pd.read_csv('input/train.csv')
df['price_doc'] = invest_train_predict_df['price_doc']
df.loc[df.product_type=="OwnerOccupier", 'price_doc'] = owner_train_predict_df['price_doc']
train_predict = df[["id", "price_doc"]]
train_predict.to_csv('gunja_train.csv', index=False)
'''
# For Test Data Set
test_df = pd.read_csv('input/test.csv', parse_dates=['timestamp'])
test_df['price_doc'] = gunja_invest['price_doc']
test_df.loc[test_df.product_type=="OwnerOccupier", 'price_doc'] = gunja_owner['price_doc']
gunja_output = test_df[["id", "price_doc"]]
print gunja_output.head()
gunja_output.to_csv('gunja_test.csv', index=False)
print "[INFO] Average Price =", gunja_output['price_doc'].mean()
| mit |
marcocaccin/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
rustychris/stompy | stompy/io/local/usgs_nwis.py | 1 | 9091 | import datetime
import os
import logging
import re
import six
from six.moves import cPickle
import numpy as np
import xarray as xr
import pandas as pd
import requests
log=logging.getLogger('usgs_nwis')
from ... import utils
from .. import rdb
from .common import periods
try:
import seawater
except ImportError:
seawater=None
def nwis_dataset_collection(stations,*a,**k):
"""
Fetch from multiple stations, glue together to a combined dataset.
The rest of the options are the same as for nwis_dataset().
Stations for which no data was found are omitted in the results.
"""
ds_per_site=[]
for station in stations:
ds=nwis_dataset(station,*a,**k)
if ds is None:
continue
ds['site']=('site',),[station]
ds_per_site.append(ds)
# And now glue those all together, but no filling of gaps yet.
# As cases of missing data come up, this will have to get smarter about padding
# individual sites.
if len(ds_per_site)==0:
# Annoying, but if no stations exist, just return None
return None
collection=xr.concat( ds_per_site, dim='site')
for ds in ds_per_site:
ds.close() # free up FDs
return collection
def nwis_dataset(station,start_date,end_date,products,
days_per_request='M',frequency='realtime',
cache_dir=None,clip=True,cache_only=False,
cache_no_data=False):
"""
Retrieval script for USGS waterdata.usgs.gov
Retrieve one or more data products from a single station.
station: string or numeric identifier for COOPS station.
products: list of integers identifying the variable to retrieve. See
usgs_parm_codes.tsv in the directory above this directory.
start_date,end_date: period to retrieve, as python datetime, matplotlib datenum,
or numpy datetime64.
days_per_request: batch the requests to fetch smaller chunks at a time.
if this is an integer, then chunks will start with start_date, then start_date+days_per_request,
etc.
if this is a string, it is interpreted as the frequency argument to pandas.PeriodIndex.
so 'M' will request month-aligned chunks. this has the advantage that requests for different
start dates will still be aligned to integer periods, and can reuse cached data.
cache_dir: if specified, save each chunk as a netcdf file in this directory,
with filenames that include the gage, period and products. The directory must already
exist.
clip: if True, then even if more data was fetched, return only the period requested.
frequency: defaults to "realtime" which should correspond to the original
sample frequency. Alternatively, "daily" which access daily average values.
cache_only: only read from cache, not attempting to fetch any new data.
cache_no_data: periods which successfully download but contain no data are recorded
as empty files. Otherwise it is assumed that there may be a transient error, and
nothing is written to cache. Do not use this for real-time retrievals, since it may
cache no-data results from the future.
returns an xarray dataset.
Note that names of variables are inferred from parameter codes where possible,
but this is not 100% accurate with respect to the descriptions provided in the rdb,
notably "Discharge, cubic feet per second" may be reported as
"stream_flow_mean_daily"
"""
start_date=utils.to_dt64(start_date)
end_date=utils.to_dt64(end_date)
params=dict(site_no=station,
format='rdb')
for prod in products:
params['cb_%05d'%prod]='on'
# Only for small requests of recent data:
# base_url="https://waterdata.usgs.gov/nwis/uv"
# Otherwise it redirects to here:
if frequency=='realtime':
base_url="https://nwis.waterdata.usgs.gov/usa/nwis/uv/"
elif frequency=='daily':
base_url="https://waterdata.usgs.gov/nwis/dv"
else:
raise Exception("Unknown frequency: %s"%(frequency))
params['period']=''
# generator for dicing up the request period
datasets=[]
last_url=None
for interval_start,interval_end in periods(start_date,end_date,days_per_request):
params['begin_date']=utils.to_datetime(interval_start).strftime('%Y-%m-%d')
params['end_date'] =utils.to_datetime(interval_end).strftime('%Y-%m-%d')
# This is the base name for caching, but also a shorthand for reporting
# issues with the user, since it already encapsulates most of the
# relevant info in a single tidy string.
base_fn="%s_%s_%s_%s.nc"%(station,
"-".join(["%d"%p for p in products]),
params['begin_date'],
params['end_date'])
if cache_dir is not None:
cache_fn=os.path.join(cache_dir,base_fn)
else:
cache_fn=None
if (cache_fn is not None) and os.path.exists(cache_fn):
log.info("Cached %s -- %s"%(interval_start,interval_end))
if os.path.getsize(cache_fn)==0:
# Cached no-data result
log.warning(" cache for %s -- %s says no-data"%(interval_start,interval_end))
continue
ds=xr.open_dataset(cache_fn)
elif cache_only:
log.info("Cache only - no data for %s -- %s"%(interval_start,interval_end))
continue
else:
log.info("Fetching %s"%(base_fn))
sesh = requests.Session()
sesh.mount('https://', requests.adapters.HTTPAdapter(max_retries=3))
req=sesh.get(base_url,params=params)
data=req.text
ds=rdb.rdb_to_dataset(text=data)
if ds is None: # There was no data there HERE - would like to have an option to record no data
log.warning(" %s: no data found for this period"%base_fn)
if (cache_fn is not None) and cache_no_data:
log.warning(" %s: making zero-byte cache file"%base_fn)
with open(cache_fn,'wb') as fp: pass
continue
ds.attrs['url']=req.url
if cache_fn is not None:
ds.to_netcdf(cache_fn)
# USGS returns data inclusive of the requested date range - leading to some overlap
if len(datasets):
ds=ds.isel(time=ds.time>datasets[-1].time[-1])
datasets.append(ds)
if len(datasets)==0:
# could try to construct zero-length dataset, but that sounds like a pain
# at the moment.
log.warning(" no data for station %s for any periods!"%station)
return None
if len(datasets)>1:
# it's possible that not all variables appear in all datasets
# dataset=xr.concat( datasets, dim='time')
dataset=datasets[0]
for other in datasets[1:]:
dataset=dataset.combine_first(other)
for stale in datasets:
stale.close() # maybe free up FDs?
else:
dataset=datasets[0]
if clip:
time_sel=(dataset.time.values>=start_date) & (dataset.time.values<end_date)
dataset=dataset.isel(time=time_sel)
dataset.load() # force read into memory before closing files
for d in datasets:
d.close()
for meta in ['datenum','tz_cd']:
if meta in dataset.data_vars:
dataset=dataset.set_coords(meta)
return dataset
def add_salinity(ds):
assert seawater is not None
for v in ds.data_vars:
if v.startswith('specific_conductance'):
salt_name=v.replace('specific_conductance','salinity')
if salt_name not in ds:
print("%s => %s"%(v,salt_name))
salt=seawater.eos80.salt(ds[v].values/1000. / seawater.constants.c3515,
25.0, # temperature - USGS adjusts to 25degC
0) # no pressure effects
ds[salt_name]=ds[v].dims, salt
def station_metadata(station,cache_dir=None):
if cache_dir is not None:
cache_fn=os.path.join(cache_dir,"meta-%s.pkl"%station)
if os.path.exists(cache_fn):
with open(cache_fn,'rb') as fp:
meta=cPickle.load(fp)
return meta
url="https://waterdata.usgs.gov/nwis/inventory?agency_code=USGS&site_no=%s"%station
resp=requests.get(url)
m=re.search(r"Latitude\s+([.0-9&#;']+\")",resp.text)
lat=m.group(1)
m=re.search(r"Longitude\s+([.0-9&#;']+\")",resp.text)
lon=m.group(1)
def dms_to_dd(s):
s=s.replace('°',' ').replace('"',' ').replace("'"," ").strip()
d,m,s =[float(p) for p in s.split()]
return d + m/60. + s/3600.
lat=dms_to_dd(lat)
# no mention of west longitude, but can assume it is west.
lon=-dms_to_dd(lon)
meta=dict(lat=lat,lon=lon)
if cache_dir is not None:
with open(cache_fn,'wb') as fp:
cPickle.dump(meta,fp)
return meta
| mit |
jreback/pandas | pandas/tests/arrays/boolean/test_construction.py | 6 | 12857 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import BooleanArray
from pandas.core.arrays.boolean import coerce_to_array
def test_boolean_array_constructor():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.tolist(), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, mask.tolist())
with pytest.raises(TypeError, match="values should be boolean numpy array"):
BooleanArray(values.astype(int), mask)
with pytest.raises(TypeError, match="mask should be boolean numpy array"):
BooleanArray(values, None)
with pytest.raises(ValueError, match="values must be a 1D array"):
BooleanArray(values.reshape(1, -1), mask)
with pytest.raises(ValueError, match="mask must be a 1D array"):
BooleanArray(values, mask.reshape(1, -1))
def test_boolean_array_constructor_copy():
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(values, mask)
assert result._data is values
assert result._mask is mask
result = BooleanArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_boolean_array():
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, False])
)
result = pd.array([True, False, True], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True]), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
expected = BooleanArray(
np.array([True, False, True]), np.array([False, False, True])
)
result = pd.array([True, False, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_all_none():
expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True]))
result = pd.array([None, None, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([True, False, None, np.nan, pd.NA], [True, False, None, None, None]),
([True, np.nan], [True, None]),
([True, pd.NA], [True, None]),
([np.nan, np.nan], [None, None]),
(np.array([np.nan, np.nan], dtype=float), [None, None]),
],
)
def test_to_boolean_array_missing_indicators(a, b):
result = pd.array(a, dtype="boolean")
expected = pd.array(b, dtype="boolean")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["foo", "bar"],
["1", "2"],
# "foo",
[1, 2],
[1.0, 2.0],
pd.date_range("20130101", periods=2),
np.array(["foo"]),
np.array([1, 2]),
np.array([1.0, 2.0]),
[np.nan, {"a": 1}],
],
)
def test_to_boolean_array_error(values):
# error in converting existing arrays to BooleanArray
msg = "Need to pass bool-like value"
with pytest.raises(TypeError, match=msg):
pd.array(values, dtype="boolean")
def test_to_boolean_array_from_integer_array():
result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1, 0, 1, None]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_float_array():
result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_integer_like():
# integers of 0's and 1's
result = pd.array([1, 0, 1, 0], dtype="boolean")
expected = pd.array([True, False, True, False], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
# with missing values
result = pd.array([1, 0, 1, None], dtype="boolean")
expected = pd.array([True, False, True, None], dtype="boolean")
tm.assert_extension_array_equal(result, expected)
def test_coerce_to_array():
# TODO this is currently not public API
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is values
assert result._mask is mask
result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True))
expected = BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
assert result._data is not values
assert result._mask is not mask
# mixed missing from values and mask
values = [True, False, None, False]
mask = np.array([False, False, False, True], dtype="bool")
result = BooleanArray(*coerce_to_array(values, mask=mask))
expected = BooleanArray(
np.array([True, False, True, True]), np.array([False, False, True, True])
)
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask))
tm.assert_extension_array_equal(result, expected)
result = BooleanArray(*coerce_to_array(values, mask=mask.tolist()))
tm.assert_extension_array_equal(result, expected)
# raise errors for wrong dimension
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
with pytest.raises(ValueError, match="values must be a 1D list-like"):
coerce_to_array(values.reshape(1, -1))
with pytest.raises(ValueError, match="mask must be a 1D list-like"):
coerce_to_array(values, mask=mask.reshape(1, -1))
def test_coerce_to_array_from_boolean_array():
# passing BooleanArray to coerce_to_array
values = np.array([True, False, True, False], dtype="bool")
mask = np.array([False, False, False, True], dtype="bool")
arr = BooleanArray(values, mask)
result = BooleanArray(*coerce_to_array(arr))
tm.assert_extension_array_equal(result, arr)
# no copy
assert result._data is arr._data
assert result._mask is arr._mask
result = BooleanArray(*coerce_to_array(arr), copy=True)
tm.assert_extension_array_equal(result, arr)
assert result._data is not arr._data
assert result._mask is not arr._mask
with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"):
coerce_to_array(arr, mask=mask)
def test_coerce_to_numpy_array():
# with missing values -> object dtype
arr = pd.array([True, False, None], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# also with no missing values -> object dtype
arr = pd.array([True, False, True], dtype="boolean")
result = np.array(arr)
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
# force bool dtype
result = np.array(arr, dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
# with missing values will raise error
arr = pd.array([True, False, None], dtype="boolean")
msg = (
"cannot convert to 'bool'-dtype NumPy array with missing values. "
"Specify an appropriate 'na_value' for this dtype."
)
with pytest.raises(ValueError, match=msg):
np.array(arr, dtype="bool")
def test_to_boolean_array_from_strings():
result = BooleanArray._from_sequence_of_strings(
np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object)
)
expected = BooleanArray(
np.array([True, False, True, True, False, False, False]),
np.array([False, False, False, False, False, False, True]),
)
tm.assert_extension_array_equal(result, expected)
def test_to_boolean_array_from_strings_invalid_string():
with pytest.raises(ValueError, match="cannot be cast"):
BooleanArray._from_sequence_of_strings(["donkey"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
# default (with or without missing values) -> object dtype
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, True], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy()
expected = np.array([True, False, pd.NA], dtype="object")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype="str")
expected = np.array([True, False, pd.NA], dtype="<U5")
tm.assert_numpy_array_equal(result, expected)
# no missing values -> can convert to bool, otherwise raises
arr = con([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype="bool")
expected = np.array([True, False, True], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
arr = con([True, False, None], dtype="boolean")
with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"):
result = arr.to_numpy(dtype="bool")
# specify dtype and na_value
arr = con([True, False, None], dtype="boolean")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([True, False, None], dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype=bool, na_value=False)
expected = np.array([True, False, False], dtype="bool")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="int64", na_value=-99)
expected = np.array([1, 0, -99], dtype="int64")
tm.assert_numpy_array_equal(result, expected)
result = arr.to_numpy(dtype="float64", na_value=np.nan)
expected = np.array([1, 0, np.nan], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
# converting to int or float without specifying na_value raises
with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"):
arr.to_numpy(dtype="int64")
with pytest.raises(ValueError, match="cannot convert to 'float64'-dtype"):
arr.to_numpy(dtype="float64")
def test_to_numpy_copy():
# to_numpy can be zero-copy if no missing values
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool)
result[0] = False
tm.assert_extension_array_equal(
arr, pd.array([False, False, True], dtype="boolean")
)
arr = pd.array([True, False, True], dtype="boolean")
result = arr.to_numpy(dtype=bool, copy=True)
result[0] = False
tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean"))
# FIXME: don't leave commented out
# TODO when BooleanArray coerces to object dtype numpy array, need to do conversion
# manually in the indexing code
# def test_indexing_boolean_mask():
# arr = pd.array([1, 2, 3, 4], dtype="Int64")
# mask = pd.array([True, False, True, False], dtype="boolean")
# result = arr[mask]
# expected = pd.array([1, 3], dtype="Int64")
# tm.assert_extension_array_equal(result, expected)
# # missing values -> error
# mask = pd.array([True, False, True, None], dtype="boolean")
# with pytest.raises(IndexError):
# result = arr[mask]
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.