repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
toastedcornflakes/scikit-learn
|
sklearn/neighbors/regression.py
|
31
|
10999
|
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
arahuja/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
7
|
11398
|
"""
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1, make_regression
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
|
bsd-3-clause
|
dsm054/pandas
|
asv_bench/benchmarks/timeseries.py
|
3
|
11590
|
from datetime import timedelta
import numpy as np
from pandas import to_datetime, date_range, Series, DataFrame, period_range
from pandas.tseries.frequencies import infer_freq
try:
from pandas.plotting._converter import DatetimeConverter
except ImportError:
from pandas.tseries.converter import DatetimeConverter
class DatetimeIndex(object):
params = ['dst', 'repeated', 'tz_aware', 'tz_naive']
param_names = ['index_type']
def setup(self, index_type):
N = 100000
dtidxes = {'dst': date_range(start='10/29/2000 1:00:00',
end='10/29/2000 1:59:59', freq='S'),
'repeated': date_range(start='2000',
periods=N / 10,
freq='s').repeat(10),
'tz_aware': date_range(start='2000',
periods=N,
freq='s',
tz='US/Eastern'),
'tz_naive': date_range(start='2000',
periods=N,
freq='s')}
self.index = dtidxes[index_type]
def time_add_timedelta(self, index_type):
self.index + timedelta(minutes=2)
def time_normalize(self, index_type):
self.index.normalize()
def time_unique(self, index_type):
self.index.unique()
def time_to_time(self, index_type):
self.index.time
def time_get(self, index_type):
self.index[0]
def time_timeseries_is_month_start(self, index_type):
self.index.is_month_start
def time_to_date(self, index_type):
self.index.date
def time_to_pydatetime(self, index_type):
self.index.to_pydatetime()
class TzLocalize(object):
def setup(self):
dst_rng = date_range(start='10/29/2000 1:00:00',
end='10/29/2000 1:59:59', freq='S')
self.index = date_range(start='10/29/2000',
end='10/29/2000 00:59:59', freq='S')
self.index = self.index.append(dst_rng)
self.index = self.index.append(dst_rng)
self.index = self.index.append(date_range(start='10/29/2000 2:00:00',
end='10/29/2000 3:00:00',
freq='S'))
def time_infer_dst(self):
self.index.tz_localize('US/Eastern', ambiguous='infer')
class ResetIndex(object):
params = [None, 'US/Eastern']
param_names = 'tz'
def setup(self, tz):
idx = date_range(start='1/1/2000', periods=1000, freq='H', tz=tz)
self.df = DataFrame(np.random.randn(1000, 2), index=idx)
def time_reest_datetimeindex(self, tz):
self.df.reset_index()
class Factorize(object):
params = [None, 'Asia/Tokyo']
param_names = 'tz'
def setup(self, tz):
N = 100000
self.dti = date_range('2011-01-01', freq='H', periods=N, tz=tz)
self.dti = self.dti.repeat(5)
def time_factorize(self, tz):
self.dti.factorize()
class InferFreq(object):
params = [None, 'D', 'B']
param_names = ['freq']
def setup(self, freq):
if freq is None:
self.idx = date_range(start='1/1/1700', freq='D', periods=10000)
self.idx.freq = None
else:
self.idx = date_range(start='1/1/1700', freq=freq, periods=10000)
def time_infer_freq(self, freq):
infer_freq(self.idx)
class TimeDatetimeConverter(object):
def setup(self):
N = 100000
self.rng = date_range(start='1/1/2000', periods=N, freq='T')
def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
class Iteration(object):
params = [date_range, period_range]
param_names = ['time_index']
def setup(self, time_index):
N = 10**6
self.idx = time_index(start='20140101', freq='T', periods=N)
self.exit = 10000
def time_iter(self, time_index):
for _ in self.idx:
pass
def time_iter_preexit(self, time_index):
for i, _ in enumerate(self.idx):
if i > self.exit:
break
class ResampleDataFrame(object):
params = ['max', 'mean', 'min']
param_names = ['method']
def setup(self, method):
rng = date_range(start='20130101', periods=100000, freq='50L')
df = DataFrame(np.random.randn(100000, 2), index=rng)
self.resample = getattr(df.resample('1s'), method)
def time_method(self, method):
self.resample()
class ResampleSeries(object):
params = (['period', 'datetime'], ['5min', '1D'], ['mean', 'ohlc'])
param_names = ['index', 'freq', 'method']
def setup(self, index, freq, method):
indexes = {'period': period_range(start='1/1/2000',
end='1/1/2001',
freq='T'),
'datetime': date_range(start='1/1/2000',
end='1/1/2001',
freq='T')}
idx = indexes[index]
ts = Series(np.random.randn(len(idx)), index=idx)
self.resample = getattr(ts.resample(freq), method)
def time_resample(self, index, freq, method):
self.resample()
class ResampleDatetetime64(object):
# GH 7754
def setup(self):
rng3 = date_range(start='2000-01-01 00:00:00',
end='2000-01-01 10:00:00', freq='555000U')
self.dt_ts = Series(5, rng3, dtype='datetime64[ns]')
def time_resample(self):
self.dt_ts.resample('1S').last()
class AsOf(object):
params = ['DataFrame', 'Series']
param_names = ['constructor']
def setup(self, constructor):
N = 10000
M = 10
rng = date_range(start='1/1/1990', periods=N, freq='53s')
data = {'DataFrame': DataFrame(np.random.randn(N, M)),
'Series': Series(np.random.randn(N))}
self.ts = data[constructor]
self.ts.index = rng
self.ts2 = self.ts.copy()
self.ts2.iloc[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3.iloc[-5000:] = np.nan
self.dates = date_range(start='1/1/1990', periods=N * 10, freq='5s')
self.date = self.dates[0]
self.date_last = self.dates[-1]
self.date_early = self.date - timedelta(10)
# test speed of pre-computing NAs.
def time_asof(self, constructor):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self, constructor):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# without *while* loop
def time_asof_single(self, constructor):
self.ts.asof(self.date)
# test speed of the code path for a scalar index
# before the start. should be the same as above.
def time_asof_single_early(self, constructor):
self.ts.asof(self.date_early)
# test the speed of the code path for a scalar index
# with a long *while* loop. should still be much
# faster than pre-computing all the NAs.
def time_asof_nan_single(self, constructor):
self.ts3.asof(self.date_last)
class SortIndex(object):
params = [True, False]
param_names = ['monotonic']
def setup(self, monotonic):
N = 10**5
idx = date_range(start='1/1/2000', periods=N, freq='s')
self.s = Series(np.random.randn(N), index=idx)
if not monotonic:
self.s = self.s.sample(frac=1)
def time_sort_index(self, monotonic):
self.s.sort_index()
def time_get_slice(self, monotonic):
self.s[:10000]
class IrregularOps(object):
def setup(self):
N = 10**5
idx = date_range(start='1/1/2000', periods=N, freq='s')
s = Series(np.random.randn(N), index=idx)
self.left = s.sample(frac=1)
self.right = s.sample(frac=1)
def time_add(self):
self.left + self.right
class Lookup(object):
def setup(self):
N = 1500000
rng = date_range(start='1/1/2000', periods=N, freq='S')
self.ts = Series(1, index=rng)
self.lookup_val = rng[N // 2]
def time_lookup_and_cleanup(self):
self.ts[self.lookup_val]
self.ts.index._cleanup()
class ToDatetimeYYYYMMDD(object):
def setup(self):
rng = date_range(start='1/1/2000', periods=10000, freq='D')
self.stringsD = Series(rng.strftime('%Y%m%d'))
def time_format_YYYYMMDD(self):
to_datetime(self.stringsD, format='%Y%m%d')
class ToDatetimeISO8601(object):
def setup(self):
rng = date_range(start='1/1/2000', periods=20000, freq='H')
self.strings = rng.strftime('%Y-%m-%d %H:%M:%S').tolist()
self.strings_nosep = rng.strftime('%Y%m%d %H:%M:%S').tolist()
self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800'
for x in rng]
def time_iso8601(self):
to_datetime(self.strings)
def time_iso8601_nosep(self):
to_datetime(self.strings_nosep)
def time_iso8601_format(self):
to_datetime(self.strings, format='%Y-%m-%d %H:%M:%S')
def time_iso8601_format_no_sep(self):
to_datetime(self.strings_nosep, format='%Y%m%d %H:%M:%S')
def time_iso8601_tz_spaceformat(self):
to_datetime(self.strings_tz_space)
class ToDatetimeNONISO8601(object):
def setup(self):
N = 10000
half = int(N / 2)
ts_string_1 = 'March 1, 2018 12:00:00+0400'
ts_string_2 = 'March 1, 2018 12:00:00+0500'
self.same_offset = [ts_string_1] * N
self.diff_offset = [ts_string_1] * half + [ts_string_2] * half
def time_same_offset(self):
to_datetime(self.same_offset)
def time_different_offset(self):
to_datetime(self.diff_offset)
class ToDatetimeFormat(object):
def setup(self):
self.s = Series(['19MAY11', '19MAY11:00:00:00'] * 100000)
self.s2 = self.s.str.replace(':\\S+$', '')
def time_exact(self):
to_datetime(self.s2, format='%d%b%y')
def time_no_exact(self):
to_datetime(self.s, format='%d%b%y', exact=False)
class ToDatetimeCache(object):
params = [True, False]
param_names = ['cache']
def setup(self, cache):
N = 10000
self.unique_numeric_seconds = list(range(N))
self.dup_numeric_seconds = [1000] * N
self.dup_string_dates = ['2000-02-11'] * N
self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * N
def time_unique_seconds_and_unit(self, cache):
to_datetime(self.unique_numeric_seconds, unit='s', cache=cache)
def time_dup_seconds_and_unit(self, cache):
to_datetime(self.dup_numeric_seconds, unit='s', cache=cache)
def time_dup_string_dates(self, cache):
to_datetime(self.dup_string_dates, cache=cache)
def time_dup_string_dates_and_format(self, cache):
to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=cache)
def time_dup_string_tzoffset_dates(self, cache):
to_datetime(self.dup_string_with_tz, cache=cache)
class DatetimeAccessor(object):
def setup(self):
N = 100000
self.series = Series(date_range(start='1/1/2000', periods=N, freq='T'))
def time_dt_accessor(self):
self.series.dt
def time_dt_accessor_normalize(self):
self.series.dt.normalize()
from .pandas_vb_common import setup # noqa: F401
|
bsd-3-clause
|
diegocavalca/Studies
|
phd-thesis/benchmarkings/Imaging-NILM-time-series/ConvNNWithPretrainedModel.py
|
1
|
8720
|
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
from sklearn.externals import joblib
import sklearn
from sklearn import tree
import PIL
#READ LABELS
# from sklearn.cross_validation import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor, RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score, mean_absolute_error, \
confusion_matrix
def getFeatures(num_of_imgs):
model = VGG16(include_top=False, weights='imagenet', input_tensor=None, input_shape=(100, 100, 3), pooling='avg',
classes=1000)
# model.compile()
vgg16_feature_list = []
img_path = 'b1-16-17/fig-'
for i in range(0, num_of_imgs):
path = img_path + str(i) + '.png'
img = image.load_img(path, target_size=(100, 100))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
vgg16_feature = model.predict(x)
vgg16_feature_np = np.array(vgg16_feature)
vgg16_feature_list.append(vgg16_feature_np.flatten())
feature_array = np.array(vgg16_feature_list)
return feature_array
def saveFeatures(filename, farray):
np.save(filename, farray)
def readFeatures(filename):
return np.load(filename)
def make_data_for_redd(flist,vgg16list):
label_List = []
for n in flist:
f = open(n).readlines()
for line in f:
label = line.split(' ')
labelarr = np.asarray(label).astype(np.float)
labelavg = np.average(labelarr)
if (labelavg > 0.00270):
labelavg = 1
else:
labelavg = 0
label_List.append(labelavg)
vgg16Arr= []
# vgg16Arr.extend(vgg16list)
for n in vgg16list:
tmp = readFeatures(n)
for i in range(0,tmp.__len__()):
vgg16Arr.append(tmp[i])
return (np.asarray(label_List), np.asarray(vgg16Arr))
def create_multilable_y(filenameA, filenameB, thressholdA, thressholdB):
fA = open(filenameA)
fB = open(filenameB)
new_Y= []
for l1, l2 in zip(fA, fB):
l1 = l1.split(' ')
l2 = l2.split(' ')
l1 = np.asarray(l1).astype(np.float)
l2 = np.asarray(l2).astype(np.float)
avg1 = np.average(l1)
avg2 = np.average(l2)
if (avg1 > thressholdA):
avg1 = 1
else: avg1 = 0
if (avg2 > thressholdB):
avg2 = 1
else: avg2 = 0
new_Y.append([avg1, avg2])
return np.asarray(new_Y)
def runTrainRedd(device):
# REDD READING
labelList, vgg16_feature_array = make_data_for_redd(['data/'+device+'1-b1-labels', 'data/'+device+'1-b2-labels',
'data/'+device+'1-b3-labels', 'data/'+device+'1-b4-labels',
'data/'+device+'1-b5-labels', 'data/'+device+'1-b6-labels'],
['numpy-files/vgg16-redd-b1.npy', 'numpy-files/vgg16-redd-b2.npy',
'numpy-files/vgg16-redd-b3.npy', 'numpy-files/vgg16-redd-b4.npy',
'numpy-files/vgg16-redd-b5.npy','numpy-files/vgg16-redd-b6.npy'])
num_of_imgs = labelList.__len__()
train_X, test_X, train_Y, test_Y = train_test_split(vgg16_feature_array[:num_of_imgs], labelList, test_size=0.30,
random_state=42)
# ##### Uncomment classifier of choice #####
# clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),n_estimators=500, learning_rate=0.25)
# clf = AdaBoostClassifier(n_estimators=1000, learning_rate=0.25)
# clf = AdaBoostClassifier(RandomForestClassifier(random_state=0.7), n_estimators=1000, learning_rate=0.5)
clf = DecisionTreeClassifier(max_depth=15)
# clf = RandomForestClassifier(n_estimators=1000, random_state=7)
# clf = MLPClassifier(hidden_layer_sizes=500, batch_size=20)
# Train classifier
clf.fit(train_X,train_Y)
# Save classifier for future use
joblib.dump(clf, 'Tree'+'-'+device+'-redd-all.joblib')
# Predict test data
pred = clf.predict(test_X)
# Print metrics
printmetrics(test_Y,pred)
return
def printmetrics(test, predicted):
##CLASSIFICATION METRICS
f1m = f1_score(test, predicted, average='macro')
f1 = f1_score(test, predicted)
acc = accuracy_score(test, predicted)
rec = recall_score(test, predicted)
prec = precision_score(test, predicted)
# print('f1:',f1)
# print('acc: ',acc)
# print('recall: ',rec)
# print('precision: ',prec)
# # to copy paste print
print("=== For docs: {:.4}\t{:.4}\t{:.4}\t{:.4}\t{:.4}".format(rec, prec, acc, f1m, f1))
# ##REGRESSION METRICS
# mae = mean_absolute_error(test_Y,pred)
# print('mae: ',mae)
# E_pred = sum(pred)
# E_ground = sum(test_Y)
# rete = abs(E_pred-E_ground)/float(max(E_ground,E_pred))
# print('relative error total energy: ',rete)
return
def plot_predicted_and_ground_truth(test, predicted):
import matplotlib.pyplot as plt
plt.plot(predicted.flatten(), label = 'pred')
plt.plot(test.flatten(), label= 'Y')
plt.show()
return
def runTrainUkdale(device, house):
# Read data labels
h = house.split('-')
print h[0]
if h[0] == '1':
f = open('data/'+device+h[1]+'-'+h[2]+'-labels').readlines()
else:
f = open('data/' + device + '1-1-labels').readlines()
# Read thresholds
thres = float(readThreshold(device, house))
labelList = []
for line in f:
label = line.split(' ')
labelarr = np.asarray(label).astype(np.float)
labelavg = np.average(labelarr)
if (labelavg > thres):
labelavg = 1
else:
labelavg = 0
labelList.append(labelavg)
labelList = np.asarray(labelList)
# UKDALE READING
print('completed reading labels')
num_of_imgs = labelList.__len__()
# Uncomment below if needed to create own vgg16 feautures :
# ---------------------------------------------------------
# vgg16_feature_array = getFeatures(num_of_imgs-1)
# saveFeatures('numpy-files/vgg16-b1-16-17.npy', vgg16_feature_array)
# print('save completed')
# ---------------------------------------------------------
vgg16_feature_array = readFeatures('/home/nick/PycharmProjects/nanaproj/numpy-files/vgg16-b1-16-17.npy')
# vgg16_feature_array = vgg16_feature_array[:labelList.__len__()]
train_X, test_X, train_Y, test_Y = train_test_split(vgg16_feature_array[:num_of_imgs], labelList[:num_of_imgs-1], test_size=0.99,
random_state=42)
# clf = AdaBoostRegressor(DecisionTreeRegressor(), n_estimators=5, learning_rate=0.5)
# clf = RandomForestRegressor(n_estimators=10, random_state=7)
# clf = MLPRegressor(hidden_layer_sizes=20, activation='tanh')
# clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),n_estimators=500, learning_rate=0.25)
# clf = AdaBoostClassifier(n_estimators=1000, learning_rate=0.25)
# clf = AdaBoostClassifier(RandomForestClassifier(random_state=0.7), n_estimators=1000, learning_rate=0.5)
# clf = DecisionTreeClassifier(max_depth=15)
# clf = RandomForestClassifier(n_estimators=1000, random_state=7)
# clf = MLPClassifier(hidden_layer_sizes=500, batch_size=20)
# cv = cross_val_score(model_tree, train_X, train_Y, cv=10)
# print("Accuracy: %0.2f (+/- %0.2f)" % (cv.mean(), cv.std() * 2))
#
# clf.fit(train_X,train_Y)
# joblib.dump(clf, 'MLP5-dishwasher-redd-all.joblib')
clf = joblib.load('/media/nick/Ext hard dr/NILM nana/models/AdaTree1000-washingmachine-13-14-b1.joblib')
pred = clf.predict(test_X)
# #
# # confMatrix = confusion_matrix(test_Y, pred)
# # print("confusion matrix: ", confMatrix)
#
# # metrics
printmetrics(test_Y, pred)
#
plot_predicted_and_ground_truth(test_Y, pred)
return
def readThreshold(device, house):
threshold = 0
f = open('thresholds-'+device+'.txt').readlines()
for line in f:
splittedline = line.split(',')
if splittedline[0] == house:
threshold = splittedline[1]
return threshold
runTrainUkdale('washing machine', '1-16-17')
|
cc0-1.0
|
nhejazi/scikit-learn
|
sklearn/metrics/tests/test_common.py
|
8
|
43668
|
from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
def test_inf_nan_input():
invalids =[([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
METRICS = dict()
METRICS.update(THRESHOLDED_METRICS)
METRICS.update(REGRESSION_METRICS)
for metric in METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
# Classification metrics all raise a mixed input exception
for metric in CLASSIFICATION_METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Classification metrics can't handle a mix "
"of binary and continuous targets",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclass_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (_named_check(check_averaging, name), name, y_true,
y_true_binarize, y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name not in REGRESSION_METRICS:
continue
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_score
else:
yield _named_check(check_sample_weight_invariance, name), name,\
metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_score)
else:
yield (_named_check(check_sample_weight_invariance, name), name,
metric, y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
|
bsd-3-clause
|
wanggang3333/scikit-learn
|
sklearn/cross_decomposition/cca_.py
|
209
|
3150
|
from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
|
bsd-3-clause
|
jplourenco/bokeh
|
bokeh/_legacy_charts/builder/tests/test_step_builder.py
|
6
|
2477
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh._legacy_charts import Step
from ._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestStep(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 3, 7, 5, 26]
xyvalues['pypy'] = [12, 33, 47, 15, 126]
xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [ 2., 2., 3., 3., 7., 7., 5., 5., 26.]
y_jython = [ 22., 22.,43., 43., 10., 10., 25., 25., 26.]
y_pypy = [ 12., 12., 33., 33., 47., 47., 15., 15., 126.]
x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
|
bsd-3-clause
|
MG-RAST/kmerspectrumanalyzer
|
ksatools/graphit.py
|
1
|
7239
|
#!/usr/bin/env python
'''Tool to generate graphs of kmer spectra'''
COLORLIST = ["b", "g", "r", "c", "y",
"m", "k", "BlueViolet", "Coral", "Chartreuse",
"DarkGrey", "DeepPink", "LightPink"]
import sys
import numpy as np
import ksatools
import matplotlib as mpl
import argparse
def getcolor(index, colorlist):
if colorlist == []:
colorlist = COLORLIST
l = index % len(colorlist)
return colorlist[l]
def plotme(data, graphtype=None, label=None, n=0, opts=None, color=None, style="-", scale=1):
import matplotlib.pyplot as plt
# note, calccumsum will raise an exception here if data is invalid
if color == None:
color = getcolor(n, colorlist)
if label == "":
label = None
if graphtype == "linear" or graphtype == None:
# if opts.markers:
# pA = plt.plot(data[:, 0], data[:, 1], ".", color=color, label=label, linestyle=style)
pA = plt.plot(s * data[:, 0], data[:, 1],
color=color, label=label, linestyle=style)
legendloc = "upper right"
if graphtype == "semilogy":
if opts.dot:
pA = plt.semilogy(
s * data[:, 0], data[:, 1], ".", color=color, label=label, linestyle=style)
pA = plt.semilogy(s * data[:, 0], data[:, 1], color=color,
label=None, linestyle=style, linewidth=opts.thickness)
legendloc = "upper right"
if graphtype == "semilogx":
if opts.dot:
pA = plt.semilogx(data[:, 0], data[:, 1], ".",
color=color, label=label, linestyle=style)
pA = plt.semilogx(s * data[:, 0], data[:, 1], color=color,
label=label, linestyle=style, linewidth=opts.thickness)
legendloc = "upper right"
if graphtype == "loglog":
pA = plt.loglog(s * data[:, 0], data[:, 1], ".",
color=color, label=label, linestyle=style)
pA = plt.loglog(s * data[:, 0], data[:, 1], color=color,
label=None, linestyle=style, linewidth=opts.thickness)
legendloc = "upper right"
if graphtype == "diff":
pA = plt.plot(data[1:, 0], np.exp(np.diff(np.log(data[:, 1]))) /
data[1:, 0], ".", color=color, label=label, linestyle=style)
pA = plt.plot(data[1:, 0], np.exp(np.diff(
np.log(data[:, 1]))) / data[1:, 0], color=color, label=Nonte, linestyle=style)
legendloc = "upper right"
if not opts.suppress:
plt.legend()
if opts.plotlegend is not None:
plt.gcf().suptitle(opts.plotlegend, fontsize=24, x=0.03)
if opts.xlim != "":
x1, x2 = opts.xlim.split(",")
plt.xlim([float(x1), float(x2)])
plt.xlabel(opts.xlabel, fontsize=18)
plt.ylabel(opts.ylabel, fontsize=18)
plt.grid(1)
if __name__ == '__main__':
usage = "graphit.py <options> <arguments>"
parser = argparse.ArgumentParser(usage)
parser.add_argument("files", nargs='*', type="str")
parser.add_argument("-x", "--xlabel", dest="xlabel", action="store",
default="x label", help="")
parser.add_argument("-y", "--ylabel", dest="ylabel", action="store",
default="y label", help="")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="verbose")
parser.add_argument("-o", "--outfile", dest="outfile", action="store",
default="test.png", help="dump table with outputs ")
parser.add_argument("-g", "--graphtype", dest="graphtype", action="store",
default=None, help="graph type")
parser.add_argument("-i", "--interactive", dest="interactive", action="store_true",
default=False, help="interactive mode--draw window")
parser.add_argument("-l", "--list", dest="filelist",
default=None, help="file containing list of targets and labels")
parser.add_argument("-t", "--thickness", dest="thickness",
default=2, help="line thickness for traces")
parser.add_argument("-w", "--writetype", dest="writetype",
default="pdf", help="file type for output (pdf,png)")
parser.add_argument("-p", "--plotlegend", dest="plotlegend",
default=None, help="Overall number at top of graph")
parser.add_argument("-s", "--suppresslegend", dest="suppress", action="store_true",
default=False, help="supress display of legend")
parser.add_argument("-n", "--name", dest="title",
default=None, help="Name for graph, graph title")
parser.add_argument("-c", "--scale", dest="scale",
default=False, action="store_true", help="Multiply by col 2")
parser.add_argument("--xlim", dest="xlim",
default="", type="str", help="xlimits: comma-separated")
parser.add_argument("--ylim", dest="ylim",
default="", type="str", help="ylimits: comma-separated")
parser.add_argument("-d", "--dot", dest="dot",
default=False, action="store_true", help="plot dots")
OPTS = parser.parse_args()
SCALE = OPTS.scale
if not OPTS.interactive:
mpl.use("Agg")
else:
mpl.use('TkAgg')
import matplotlib.pyplot as plt
listfile = OPTS.filelist
IN_FILE = open(listfile, "r").readlines()
numplots = len(IN_FILE)
n = 0
for line in IN_FILE:
if line[0] != "#":
a = line.strip().split("\t")
if len(a[0]) > 0:
if len(a) == 1:
a.append(a[0])
sys.stderr.write("%s %s \n" % (a[0], a[1]))
filename = a[0]
if len(a) == 3 or len(a) == 4:
selectedcolor = a[2]
else:
selectedcolor = getcolor(n, COLORLIST)
spectrum = ksatools.loadfile(filename)
if SCALE:
s = float(a[1])
else:
s = 1
if len(a) == 4:
selectedcolor = a[2]
selectedstyle = a[3]
plotme(spectrum, label=a[1], color=selectedcolor, scale=s,
style=selectedstyle, graphtype=OPTS.graphtype, opts=OPTS)
else:
plotme(spectrum, label=a[1], color=selectedcolor, scale=s,
graphtype=OPTS.graphtype, opts=OPTS)
n = n + 1
if OPTS.suppress == 0:
plt.legend(loc="upper left")
else:
for v in OPTS.files:
print(v)
filename = v
spectrum = ksatools.loadfile(filename)
plotme(spectrum, filename, opts=OPTS,
color=COLORLIST[n], graphtype=OPTS.graphtype)
n = n + 1
# plt.legend(loc="upper left")
if OPTS.interactive:
plt.show()
if OPTS.outfile == "test.png":
sys.stderr.write("Warning! printing graphs in test.png!\n")
else:
sys.stderr.write("Printing graphs in " + OPTS.outfile + "\n")
plt.savefig(OPTS.outfile)
|
bsd-2-clause
|
pmorissette/ffn
|
setup.py
|
1
|
1415
|
import os
import re
import setuptools
with open(os.path.join(os.path.dirname(__file__), "ffn", "__init__.py"), "r") as fp:
version = re.search(
"^__version__ = \\((\\d+), (\\d+), (\\d+)\\)$", fp.read(), re.MULTILINE
).groups()
with open(os.path.join(os.path.dirname(__file__), "README.rst"), "r") as fp:
description = fp.read().replace("\r\n", "\n")
setuptools.setup(
name="ffn",
version=".".join(version),
author="Philippe Morissette",
author_email="[email protected]",
description="Financial functions for Python",
keywords="python finance quant functions",
url="https://github.com/pmorissette/ffn",
license="MIT",
install_requires=[
"decorator>=4",
"future>=0.15",
"matplotlib>=1",
"numpy>=1.5",
"pandas>=0.19",
"pandas-datareader>=0.2",
"scikit-learn>=0.15",
"scipy>=0.15",
"tabulate>=0.7.5",
],
extras_require={
"dev": [
"black>=20.8b1",
"codecov",
"coverage",
"flake8",
"flake8-black",
"future",
"mock",
"nose",
],
},
packages=["ffn"],
long_description=description,
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python",
],
)
|
mit
|
gkunter/coquery
|
coquery/visualizer/colorizer.py
|
1
|
5350
|
# -*- coding: utf-8 -*-
"""
colorizer.py is part of Coquery.
Copyright (c) 2018 Gero Kunter ([email protected])
Coquery is released under the terms of the GNU General Public License (v3).
For details, see the file LICENSE that you should have received along
with Coquery. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import unicode_literals
import pandas as pd
import seaborn as sns
from coquery.gui.pyqt_compat import QtCore, QtGui
from coquery.defines import PALETTE_BW
from coquery.general import pretty
from coquery import options
COQ_SINGLE = "COQSINGLE"
COQ_CUSTOM = "COQCUSTOM"
class Colorizer(QtCore.QObject):
def __init__(self, palette, ncol, values=None):
super(Colorizer, self).__init__()
self.palette = palette
self.ncol = ncol
self.values = values
self._title_frm = ""
self._entry_frm = "{val}"
self._reversed = False
def get_palette(self, n=None):
base, _, rev = self.palette.partition("_")
if base == PALETTE_BW:
col = ([(0, 0, 0), (1, 1, 1)] * (1 + self.ncol // 2))[:self.ncol]
elif base == COQ_SINGLE:
color = QtGui.QColor(rev)
col = [tuple(x / 255 for x in color.getRgb()[:-1])] * self.ncol
else:
col = sns.color_palette(base, self.ncol)
if rev:
col = col[::-1]
if n:
col = (col * (1 + n // len(col)))[:n]
return col
def set_reversed(self, rev):
self._reversed = rev
def get_hues(self, data):
base, _, rev = self.palette.partition("_")
n = len(data)
pal = self.get_palette()
if self._reversed:
pal = pal[::-1]
return (pal * ((n // self.ncol) + 1))[:n]
@staticmethod
def hex_to_rgb(l):
return [(int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16))
for s in l]
@staticmethod
def rgb_to_hex(l):
return ["#{:02x}{:02x}{:02x}".format(int(r), int(g), int(b))
for r, g, b in l]
@staticmethod
def rgb_to_mpt(l):
return [(r / 255, g / 255, b / 255) for r, g, b in l]
@staticmethod
def mpt_to_rgb(l):
return [(int(r * 255), int(g * 255), int(b * 255)) for r, g, b in l]
@staticmethod
def hex_to_mpt(l):
return Colorizer.rgb_to_mpt(Colorizer.hex_to_rgb(l))
@staticmethod
def mpt_to_hex(l):
return Colorizer.rgb_to_hex(Colorizer.mpt_to_rgb(l))
def legend_title(self, z):
return self._title_frm.format(z=z)
def legend_palette(self):
return []
def legend_levels(self):
return []
def set_title_frm(self, s):
self._title_frm = s
def set_entry_frm(self, s):
self._entry_frm = s
class ColorizeByFactor(Colorizer):
def __init__(self, palette, ncol, values):
super(ColorizeByFactor, self).__init__(palette, ncol, values)
self.set_title_frm("{z}")
def get_hues(self, data):
pal = self.get_palette()
color_indices = [self.values.index(val) % len(pal) for val in data]
hues = [pal[ix] for ix in color_indices]
return hues
def legend_palette(self):
n = len(self.values)
pal = self.get_palette()
return (pal * ((n // len(pal)) + 1))[:n]
def legend_levels(self):
return [self._entry_frm.format(val=x) for x in self.values]
class ColorizeByNum(Colorizer):
def __init__(self, palette, ncol, values, vrange=None):
super(ColorizeByNum, self).__init__(palette, ncol, values)
if not vrange:
vmin, vmax = values.min(), values.max()
else:
vmin, vmax = vrange
self.dtype = values.dtype
self._direct_mapping = (len(values) <= ncol and
len(values) == len(values.unique()))
if self._direct_mapping:
# Use direct mapping if there are not more categories than
# colors
self.bins = sorted(values)
self.set_entry_frm("{val}")
else:
self.bins = pretty((vmin, vmax), ncol)
self.set_entry_frm("≥ {val}")
self.set_title_frm("{z}")
def set_entry_frm(self, s):
self._entry_frm = s
def get_hues(self, data):
pal = self.get_palette(n=self.ncol)
if not self._direct_mapping:
pal = pal[::-1]
binned = pd.np.digitize(data, self.bins, right=False) - 1
return [pal[val] for val in binned]
def legend_palette(self):
return self.get_palette()
def legend_levels(self):
if self.dtype == int:
frm_str = "{:.0f}"
else:
frm_str = options.cfg.float_format
lst = [self._entry_frm.format(val=frm_str.format(x))
for x in self.bins]
if len(self.values) <= self.ncol:
return lst
else:
return lst[::-1]
class ColorizeByFreq(Colorizer):
def get_hues(self, data):
self.bins = pd.np.linspace(data.min(), data.max(),
self.ncol,
endpoint=False)
pal = self.get_palette(n=self.ncol)
binned = pd.np.digitize(data, self.bins, right=False) - 1
return [pal[val] for val in binned]
|
gpl-3.0
|
rrohan/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
218
|
3893
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
czbiohub/singlecell-dash
|
tissue_analysis.py
|
1
|
12940
|
import argparse
import os
from collections import Counter, OrderedDict
import fastcluster
import matplotlib.colors
import matplotlib.figure
import networkx
import numpy as np
import pandas as pd
import scipy.cluster.hierarchy
import scipy.stats as stats
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.legend_handler import HandlerPatch
from matplotlib.patches import Circle
from networkx.drawing.nx_agraph import graphviz_layout
from sklearn.neighbors import NearestNeighbors
import singlecell_dash.common as common
class HandlerCircle(HandlerPatch):
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize, trans):
center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
p = Circle(xy=center, radius=width / 4.0, alpha=0.4)
self.update_prop(p, orig_handle, legend)
p.set_transform(trans)
return [p]
def plot_labelprop_mpl(coords, communities, file_name=None, title=''):
u_community = np.unique(communities)
cmap = matplotlib.cm.tab20
cmap.set_over('black')
ix = np.random.permutation(np.arange(coords.shape[0], dtype=int))
x = coords[ix, 0]
y = coords[ix, 1]
fig = matplotlib.figure.Figure(figsize=(12, 12))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.scatter(x, y, s=60, alpha=0.8, linewidth=0,
color=cmap(communities[ix]))
ax.tick_params(left='off', labelleft='off', bottom='off', labelbottom='off')
ax.set_title(title)
lbl_rects = [(Circle((0, 0), 1, color=cmap(c)), c) for c in u_community]
fig.legend(*zip(*lbl_rects), **{'handler_map': {Circle: HandlerCircle()},
'loc': 7, 'fontsize': 'large'})
if file_name:
FigureCanvasAgg(fig).print_figure(file_name)
def label_propagation(G, verbose=False):
node_labels = {node: i for i, node in enumerate(G.nodes())}
n_changes = 1
while n_changes:
n_changes = 0
for node in G.nodes():
neighbor_labels = Counter([node_labels[n] for n in G.neighbors(node)])
pop_label = neighbor_labels.most_common(1)[0][0]
if node_labels[node] != pop_label:
node_labels[node] = pop_label
n_changes += 1
if verbose:
print("Round with ", n_changes, " to labels.")
label_renames = {label: i for i, (label, c)
in enumerate(Counter(node_labels.values()).most_common())}
for node in node_labels:
node_labels[node] = label_renames[node_labels[node]]
if verbose:
print("Most common labels, in the form label, frequency")
print(Counter(node_labels.values()).most_common())
return node_labels
def network_layout(matrix, k=30):
nbrs = NearestNeighbors(k, algorithm='brute',
metric='cosine').fit(matrix)
G = networkx.from_scipy_sparse_matrix(nbrs.kneighbors_graph(matrix))
node_labels = label_propagation(G, verbose=True)
communities_labelprop = np.array([node_labels[i] for i in range(matrix.shape[0])])
pos = graphviz_layout(G, prog="sfdp")
coords = np.array([pos[i] for i in range(len(pos))])
print(coords.shape)
return coords, communities_labelprop
def expression(matrix, group):
g = matrix[group,:].tocsc()
mu = np.asarray(g.mean(0)).flatten()
std = np.sqrt(np.asarray((g.power(2)).mean(0)).flatten() - mu ** 2)
percent_nz = 100*np.asarray((g > 0).mean(0)).flatten()
return mu, std, percent_nz
def cluster_expression(tenx, clusters, skip=1):
df = pd.DataFrame(index=tenx.genes.columns)
for c in np.unique(clusters):
mu, std, percent_nz = expression(tenx.genes[::skip,:], clusters == c)
df[f'Cluster {c} mean UMI'] = mu
df[f'Cluster {c} std UMI'] = std
df[f'Cluster {c} % present'] = percent_nz
return df
def load_tissue(tissue, data_folder, channels_to_use = None):
genes_to_drop = 'Malat1|Rn45s|Rpl10|Rpl10a|Rpl10l|Rpl11|Rpl12|Rpl13|Rpl13a|Rpl14|Rpl15|Rpl17|Rpl18|Rpl18a|Rpl19|Rpl21|Rpl22|Rpl22l1|Rpl23|Rpl23a|Rpl24|Rpl26|Rpl27|Rpl27a|Rpl28|Rpl29|Rpl3|Rpl30|Rpl31|Rpl31-ps12|Rpl32|Rpl34|Rpl34-ps1|Rpl35|Rpl35a|Rpl36|Rpl36a|Rpl36al|Rpl37|Rpl37a|Rpl38|Rpl39|Rpl39l|Rpl3l|Rpl4|Rpl41|Rpl5|Rpl6|Rpl7|Rpl7a|Rpl7l1|Rpl8|Rpl9|Rplp0|Rplp1|Rplp2|Rplp2-ps1|Rps10|Rps11|Rps12|Rps13|Rps14|Rps15|Rps15a|Rps15a-ps4|Rps15a-ps6|Rps16|Rps17|Rps18|Rps19|Rps19-ps3|Rps19bp1|Rps2|Rps20|Rps21|Rps23|Rps24|Rps25|Rps26|Rps27|Rps27a|Rps27l|Rps28|Rps29|Rps3|Rps3a|Rps4x|Rps4y2|Rps5|Rps6|Rps6ka1|Rps6ka2|Rps6ka3|Rps6ka4|Rps6ka5|Rps6ka6|Rps6kb1|Rps6kb2|Rps6kc1|Rps6kl1|Rps7|Rps8|Rps9|Rpsa'.split(
'|')
tenx = common.TenX_Runs(data_folder, tissue=tissue, verbose=True, genes_to_drop=genes_to_drop,
channels_to_use=channels_to_use)
return tenx
def cluster(tenx, skip, file_format, k, tissue):
coords, communities_labelprop = network_layout(tenx.genes.matrix[::skip], k=k)
coords_df = pd.DataFrame({'0': coords[:, 0], '1': coords[:, 1], 'cluster': communities_labelprop},
index=tenx.genes.rows[::skip])
file_name = file_format.format('smushed', 'csv')
coords_df.to_csv(file_name)
file_name = file_format.format('embedding','png')
plot_labelprop_mpl(coords, communities_labelprop, title=f'{tissue}: Graph layout of clusters',
file_name=file_name)
return communities_labelprop
def diff_exp_clusters(cluster_expression_df, cluster_sizes, file_format):
n_clusters = len(cluster_sizes)
cluster_sum_umi = np.vstack(
[cluster_sizes[c] * cluster_expression_df[f'Cluster {c} mean UMI'].values
for c in range(n_clusters)]
)
cluster_ssq_umi = np.vstack(
[cluster_sizes[c] * (cluster_expression_df[f'Cluster {c} std UMI'].values ** 2
+ cluster_expression_df[f'Cluster {c} mean UMI'].values ** 2)
for c in range(n_clusters)]
)
Z = fastcluster.linkage(cluster_sum_umi, method='average', metric='cosine')
fig = matplotlib.figure.Figure(figsize=(12, 12))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
scipy.cluster.hierarchy.dendrogram(Z, ax=ax,
color_threshold=0, above_threshold_color='grey')
ax.set_title('Hierarchical structure of cell-type clusters')
ax.set_xlabel('Cluster Label')
ax.tick_params(labelleft='off')
FigureCanvasAgg(fig).print_figure(file_format.format('dendrogram', 'png'))
root, rd = scipy.cluster.hierarchy.to_tree(Z, rd=True)
def de(lbl_1, lbl_2, group1, group2):
print(f'Comparing {group1} to {group2}')
group1_n_cells = sum(cluster_sizes[c] for c in group1)
group2_n_cells = sum(cluster_sizes[c] for c in group2)
group1_mean = cluster_sum_umi[group1, :].sum(axis=0) / group1_n_cells
group2_mean = cluster_sum_umi[group2, :].sum(axis=0) / group2_n_cells
mean_diff = group1_mean - group2_mean
group1_var = (cluster_ssq_umi[group1, :].sum(axis=0)
/ group1_n_cells - group1_mean ** 2)
group2_var = (cluster_ssq_umi[group2, :].sum(axis=0)
/ group2_n_cells - group2_mean ** 2)
pooled_sd = np.sqrt(group1_var / group1_n_cells
+ group2_var / group2_n_cells)
z_scores = np.zeros_like(pooled_sd)
nz = pooled_sd > 0
z_scores[nz] = np.nan_to_num(mean_diff[nz] / pooled_sd[nz])
# t-test
p_vals = np.clip((1 - stats.norm.cdf(np.abs(z_scores)))
* 2 * z_scores.shape[0], 0, 1)
df = pd.DataFrame(OrderedDict([('z', z_scores),
('p', p_vals),
('group1', group1_mean),
('group2', group2_mean)]),
index=cluster_expression_df.index)
df = df[df['p'] < 0.001]
df['diff'] = df['group1'] - df['group2']
df.sort_values('diff', ascending=False, inplace=True)
name = f'differential_gene_expression_{lbl_1}_v_{lbl_2}'
df.to_csv(file_format.format(name, 'csv'))
for i in range(0, 2 * n_clusters - 1):
if i >= n_clusters:
left_child = rd[i].get_left()
left_clusters = (left_child.pre_order(lambda x: x.id))
right_child = rd[i].get_right()
right_clusters = (right_child.pre_order(lambda x: x.id))
# don't calculate if it's redundant with a 1-vs-all comp
if i == 2 * n_clusters - 2 and (len(left_clusters) == 1
or len(right_clusters) == 1):
continue
de(left_child.id, right_child.id, left_clusters, right_clusters)
if i < 2 * n_clusters - 2:
below = rd[i].pre_order(lambda x: x.id)
above = [j for j in range(len(cluster_sizes)) if j not in below]
# don't calculate redundant comparison
if len(above) == 1:
continue
de(i, 'all', below, above)
group_list = [(i, rd[i].pre_order(lambda x: x.id))
for i in range(0, 2 * n_clusters - 1)]
group_list[-1] = ('total', group_list[-1][1])
return group_list
if __name__ == '__main__':
all_tissues = ['Tongue', 'Liver', 'Bladder', 'Kidney', 'Spleen', 'Marrow',
'Lung', 'Muscle', 'Heart', 'Thymus', 'Mammary']
channels_to_use = ['10X_P4_0', '10X_P4_1', '10X_P4_2', '10X_P4_3',
'10X_P4_4', '10X_P4_5', '10X_P4_6', '10X_P4_7',
'10X_P6_0', '10X_P6_1', '10X_P6_2', '10X_P6_3',
'10X_P6_4', '10X_P6_5', '10X_P6_6', '10X_P6_7',
'10X_P7_0', '10X_P7_1', '10X_P7_2', '10X_P7_3',
'10X_P7_4', '10X_P7_5', '10X_P7_6', '10X_P7_7',
'10X_P7_8', '10X_P7_9', '10X_P7_10', '10X_P7_11',
'10X_P7_12', '10X_P7_13', '10X_P7_14', '10X_P7_15']
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder')
parser.add_argument('--n_samples', type=int, default=-1)
parser.add_argument('--tissues', nargs='*', default=None)
parser.add_argument('--k', type=int, default=25)
args = parser.parse_args()
if args.tissues is None:
args.tissues = all_tissues
for tissue in args.tissues:
print(f'Processing {tissue}...')
tenx = load_tissue(tissue, args.data_folder,
channels_to_use=channels_to_use)
if not os.path.exists(os.path.join(f'{args.data_folder}',
'10x_data', 'tissues', tissue)):
os.mkdir(os.path.join(f'{args.data_folder}',
'10x_data', 'tissues', tissue))
if args.n_samples < 1:
skip = 1
file_format = os.path.join(args.data_folder, '10x_data', 'tissues',
tissue, '{}.{}')
else:
skip = tenx.genes.matrix.shape[0] // args.n_samples
skip = max(skip, 1)
file_format = os.path.join(
args.data_folder, '10x_data', 'tissues', tissue,
f'{{}}-{tissue}-{args.n_samples}-{args.k}.{{}}'
)
clusters = cluster(tenx, skip, file_format=file_format,
k=args.k, tissue=tissue)
print('Computing cluster expression...')
# genewise mean expression and percent non-zero for each cluster
cluster_expression_df = cluster_expression(tenx, clusters, skip)
# drop zeros
cluster_expression_df = cluster_expression_df.loc[
cluster_expression_df.max(axis=1) != 0
]
# round for readability and output to csv
cluster_expression_df = np.round(cluster_expression_df, 2)
cluster_expression_df.to_csv(file_format.format('cluster-expression', 'csv'))
cluster_sizes = dict(Counter(clusters).most_common())
print('Computing differential expression...')
group_list = diff_exp_clusters(cluster_expression_df, cluster_sizes,
file_format)
with open(file_format.format('summary', 'txt'), 'w') as OUT:
print('Clustered {} cells into {} clusters'.format(
sum(cluster_sizes.values()), len(cluster_sizes)),
file=OUT)
print('\t'.join(('group', 'n_cells', 'member_clusters')), file=OUT)
for i,gl in group_list[:-1]:
print('{}\t{}\t{}'.format(i, sum(cluster_sizes[j] for j in gl),
', '.join(map(str, sorted(gl)))),
file=OUT)
print('total\t{}'.format(sum(cluster_sizes.values())), file=OUT)
|
mit
|
bjackman/trappy
|
tests/test_dynamic.py
|
3
|
4590
|
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import matplotlib
from test_sched import BaseTestSched
from trappy.base import Base
import trappy
class DynamicEvent(Base):
"""Test the ability to register
specific classes to trappy"""
unique_word = "dynamic_test_key"
name = "dynamic_event"
class TestDynamicEvents(BaseTestSched):
def __init__(self, *args, **kwargs):
super(TestDynamicEvents, self).__init__(*args, **kwargs)
def test_dynamic_data_frame(self):
"""
Test if the dynamic events are populated
in the data frame
"""
parse_class = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key")
t = trappy.FTrace(name="first")
self.assertTrue(len(t.dynamic_event.data_frame) == 1)
trappy.unregister_dynamic_ftrace(parse_class)
def test_dynamic_class_attr(self):
"""
Test the attibutes of the dynamically
generated class
"""
cls = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key",
pivot="test_pivot")
self.assertEquals(cls.__name__, "DynamicEvent")
self.assertEquals(cls.name, "dynamic_event")
self.assertEquals(cls.unique_word, "dynamic_test_key")
self.assertEquals(cls.pivot, "test_pivot")
trappy.unregister_dynamic_ftrace(cls)
def test_dynamic_event_plot(self):
"""Test if plotter can accept a dynamic class
for a template argument"""
cls = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key")
t = trappy.FTrace(name="first")
l = trappy.LinePlot(t, cls, column="load")
l.view(test=True)
trappy.unregister_dynamic_ftrace(cls)
def test_dynamic_event_scope(self):
"""Test the case when an "all" scope class is
registered. it should appear in both thermal and sched
ftrace class definitions when scoped ftrace objects are created
"""
cls = trappy.register_dynamic_ftrace("DynamicEvent", "dynamic_test_key")
t1 = trappy.FTrace(name="first")
self.assertTrue(t1.class_definitions.has_key(cls.name))
trappy.unregister_dynamic_ftrace(cls)
def test_register_ftrace_parser(self):
trappy.register_ftrace_parser(DynamicEvent)
t = trappy.FTrace(name="first")
self.assertTrue(len(t.dynamic_event.data_frame) == 1)
trappy.unregister_ftrace_parser(DynamicEvent)
def test_no_none_pivot(self):
"""register_dynamic_ftrace() with default value for pivot doesn't create a class with a pivot=None"""
cls = trappy.register_dynamic_ftrace("MyEvent", "my_dyn_test_key")
self.assertFalse(hasattr(cls, "pivot"))
trappy.unregister_dynamic_ftrace(cls)
def test_unregister_dynamic_ftrace(self):
"""Test that dynamic events can be unregistered"""
dyn_event = trappy.register_dynamic_ftrace("DynamicEvent",
"dynamic_test_key")
trace = trappy.FTrace(name="first")
self.assertTrue(len(trace.dynamic_event.data_frame) == 1)
trappy.unregister_dynamic_ftrace(dyn_event)
trace = trappy.FTrace(name="first")
self.assertFalse(hasattr(trace, "dynamic_event"))
dyn_event = trappy.register_dynamic_ftrace("DynamicEvent",
"dynamic_test_key",
scope="sched")
trace = trappy.FTrace(name="first")
self.assertTrue(len(trace.dynamic_event.data_frame) == 1)
trappy.unregister_dynamic_ftrace(dyn_event)
trace = trappy.FTrace(name="first")
self.assertFalse(hasattr(trace, "dynamic_event"))
def test_unregister_ftrace_parser(self):
"""unregister_ftrace_parser() works"""
trappy.register_ftrace_parser(DynamicEvent)
trappy.unregister_ftrace_parser(DynamicEvent)
trace = trappy.FTrace()
self.assertFalse(hasattr(trace, "dynamic_event"))
|
apache-2.0
|
zfrenchee/pandas
|
pandas/core/common.py
|
1
|
18758
|
"""
Misc tools for implementing data structures
"""
import sys
import warnings
from datetime import datetime, timedelta
from functools import partial
import inspect
import collections
import numpy as np
from pandas._libs import lib, tslib
from pandas import compat
from pandas.compat import long, zip, iteritems
from pandas.core.config import get_option
from pandas.core.dtypes.generic import ABCSeries, ABCIndex
from pandas.core.dtypes.common import _NS_DTYPE
from pandas.core.dtypes.inference import _iterable_not_string
from pandas.core.dtypes.missing import isna, isnull, notnull # noqa
from pandas.api import types
from pandas.core.dtypes import common
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
# compat
from pandas.errors import ( # noqa
PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError)
# back-compat of public API
# deprecate these functions
m = sys.modules['pandas.core.common']
for t in [t for t in dir(types) if not t.startswith('_')]:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"import from the public API: "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(types, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# back-compat for non-public functions
# deprecate these functions
for t in ['is_datetime_arraylike',
'is_datetime_or_timedelta_dtype',
'is_datetimelike',
'is_datetimelike_v_numeric',
'is_datetimelike_v_object',
'is_datetimetz',
'is_int_or_datetime_dtype',
'is_period_arraylike',
'is_string_like',
'is_string_like_dtype']:
def outer(t=t):
def wrapper(*args, **kwargs):
warnings.warn("pandas.core.common.{t} is deprecated. "
"These are not longer public API functions, "
"but can be imported from "
"pandas.api.types.{t} instead".format(t=t),
DeprecationWarning, stacklevel=3)
return getattr(common, t)(*args, **kwargs)
return wrapper
setattr(m, t, outer(t))
# deprecate array_equivalent
def array_equivalent(*args, **kwargs):
warnings.warn("'pandas.core.common.array_equivalent' is deprecated and "
"is no longer public API", DeprecationWarning, stacklevel=2)
from pandas.core.dtypes import missing
return missing.array_equivalent(*args, **kwargs)
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AbstractMethodError(NotImplementedError):
"""Raise this error instead of NotImplementedError for abstract methods
while keeping compatibility with Python 2 and Python 3.
"""
def __init__(self, class_instance):
self.class_instance = class_instance
def __str__(self):
msg = "This method must be defined in the concrete class of {name}"
return (msg.format(name=self.class_instance.__class__.__name__))
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
def _maybe_match_name(a, b):
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, (np.datetime64, datetime)):
value = tslib.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslib.Timedelta(value)
return value
_values_from_object = lib.values_from_object
def is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isna(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import RangeIndex
return RangeIndex(0, n, name=None)
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
msg = 'mutually exclusive arguments: {label1!r} and {label2!r}'
raise TypeError(msg.format(label1=label1, label2=label2))
elif val1 is not None:
return val1
else:
return val2
def _not_none(*args):
"""Returns a generator consisting of the arguments that are not None"""
return (arg for arg in args if arg is not None)
def _any_none(*args):
"""Returns a boolean indicating if any argument is None"""
for arg in args:
if arg is None:
return True
return False
def _all_none(*args):
"""Returns a boolean indicating if all arguments are None"""
for arg in args:
if arg is not None:
return False
return True
def _any_not_none(*args):
"""Returns a boolean indicating if any argument is not None"""
for arg in args:
if arg is not None:
return True
return False
def _all_not_none(*args):
"""Returns a boolean indicating if all arguments are not None"""
for arg in args:
if arg is None:
return False
return True
def _count_not_none(*args):
"""Returns the count of arguments that are not None"""
return sum(x is not None for x in args)
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def iterpairs(seq):
"""
Parameters
----------
seq : sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> list(iterpairs([1, 2, 3, 4]))
[(1, 2), (2, 3), (3, 4)]
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be omitted, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return {x: i for i, x in enumerate(arr)}
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return construct_1d_object_array_from_listlike(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
# Avoid building an array of arrays:
# TODO: verify whether any path hits this except #18819 (invalid)
values = [tuple(x) for x in values]
result = construct_1d_object_array_from_listlike(values)
return result
def _index_labels_to_array(labels, dtype=None):
"""
Transform label or iterable of labels to array, for use in Index.
Parameters
----------
dtype : dtype
If specified, use as dtype of the resulting array, otherwise infer.
Returns
-------
array
"""
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels, dtype=dtype)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
def is_null_slice(obj):
""" we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
obj.stop is None and obj.step is None)
def is_true_slices(l):
"""
Find non-trivial slices in "l": return a list of booleans with same length.
"""
return [isinstance(k, slice) and not is_null_slice(k) for k in l]
def is_full_slice(obj, l):
""" we have a full length slice """
return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and
obj.step is None)
def _get_callable_name(obj):
# typical case has name
if hasattr(obj, '__name__'):
return getattr(obj, '__name__')
# some objects don't; could recurse
if isinstance(obj, partial):
return _get_callable_name(obj.func)
# fall back to class name
if hasattr(obj, '__call__'):
return obj.__class__.__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
def _apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is
Parameters
----------
maybe_callable : possibly a callable
obj : NDFrame
**kwargs
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def _dict_compat(d):
"""
Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict
Parameters
----------
d: dict like object
Returns
-------
dict
"""
return dict((_maybe_box_datetimelike(key), value)
for key, value in iteritems(d))
def standardize_mapping(into):
"""
Helper function to standardize a supplied mapping.
.. versionadded:: 0.21.0
Parameters
----------
into : instance or subclass of collections.Mapping
Must be a class, an initialized collections.defaultdict,
or an instance of a collections.Mapping subclass.
Returns
-------
mapping : a collections.Mapping subclass or other constructor
a callable object that can accept an iterator to create
the desired Mapping.
See Also
--------
DataFrame.to_dict
Series.to_dict
"""
if not inspect.isclass(into):
if isinstance(into, collections.defaultdict):
return partial(
collections.defaultdict, into.default_factory)
into = type(into)
if not issubclass(into, collections.Mapping):
raise TypeError('unsupported type: {into}'.format(into=into))
elif into == collections.defaultdict:
raise TypeError(
'to_dict() only accepts initialized defaultdicts')
return into
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
# ----------------------------------------------------------------------
# Detect our environment
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main() # noqa
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
.. deprecated:: 0.14.1
This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
.. deprecated:: 0.14.1
This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython() # noqa
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', ""))
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython() # noqa
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
def _random_state(state=None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState
"""
if types.is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
elif state is None:
return np.random
else:
raise ValueError("random_state must be an integer, a numpy "
"RandomState, or None")
def _get_distinct_objs(objs):
"""
Return a list with distinct elements of "objs" (different ids).
Preserves order.
"""
ids = set()
res = []
for obj in objs:
if not id(obj) in ids:
ids.add(id(obj))
res.append(obj)
return res
def _pipe(obj, func, *args, **kwargs):
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
first argument to the function or, in the case that the func is a tuple,
interpret the first element of the tuple as a function and pass the obj to
that function as a keyword argument whose key is the value of the second
element of the tuple.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this object or, alternatively, a
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of `callable`` that expects the
object.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : dict, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = '%s is both the pipe target and a keyword argument' % target
raise ValueError(msg)
kwargs[target] = obj
return func(*args, **kwargs)
else:
return func(obj, *args, **kwargs)
|
bsd-3-clause
|
robclewley/fovea
|
examples/bombardier/bombardier.py
|
1
|
30738
|
"""
Rocket multi-body simulation inspired by
Bombardiers' Guild mobile app game
"""
from __future__ import division
import os
from PyDSTool import *
import PyDSTool.Toolbox.phaseplane as pp
import PyDSTool as dst # for potentially generalizable functions and classes to use
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RectangleSelector
import fovea
import fovea.domain2D as dom
from fovea import common, prep
import fovea.graphics as gx
gentype = 'vode'
# let rocket mass be negligible
# and G chosen to absorb m_rocket
# grav constant
G = 35
# Generic scale separation threshold
scale_thresh = 3.0
# graphics / control params
xdomain_halfwidth = .7 # should derive from YAML setup in calling script
maxangle = 80 # degrees
da_dict = dict(zip( ('h','H','j','J','n','m'),
(-1, -10, 1, 10, -0.1, 0.1)))
dv_dict = dict(zip( ('d','D','f','F','c','v'),
(-1, -10, 1, 10, -0.1, 0.1)))
# !!
# These don't work with new Bombardier implementation
#print("Change angle keys:")
#print(da_dict)
#print("Change velocity keys:")
#print(dv_dict)
# other keys used in GUIrocket:
# l = make a line of interest (click-drag-release)
# SPACE = measure forces at clicked mouse point
# s = snap clicked mouse point to closest point on trajectory
# . (o) = grow a 2D domain
# g = GO! (run simulation)
dom_key = '.'
change_mouse_state_keys = ['l', 's', ' '] + [dom_key]
# Disable mpl interactive key defaults
plt.rcParams['keymap.save'] = 'ctrl+s' # s
plt.rcParams['keymap.xscale'] = '' # l
plt.rcParams['keymap.yscale'] = '' # L
plt.rcParams['keymap.back'] = ['left', 'backspace'] # c
plt.rcParams['keymap.forward'] = 'right' # v
plt.rcParams['keymap.zoom'] = '' # o
plt.rcParams['keymap.grid'] = '' # g
# initial value
next_fighandle = 1
_non_pickle_attr = ['ax', 'fig',
'trajline', 'startpt', 'endpt', 'quartiles',
'widgets',
'RS_line']
# attributes that themselves contain non-picklable objects
_non_pickle_subattr = ['context_objects', # e.g. line_GUI
'tracked_objects'] # e.g. measure with dynamic fns
class GUIrocket(gx.diagnosticGUI):
def __init__(self, bodies, title, axisbgcol='black'):
"""
bodies is a dict-like mapping of 1 or more:
<ID int>: {'density': <float>,
'radius': <float>,
'position': [<float>, <float>]}
"""
global next_fighandle
plotter = gx.Plotter()
gx.diagnosticGUI.__init__(self, plotter)
self.current_domain_handler = dom.GUI_domain_handler(self)
self._event_num = 1 # for line_to_event counter
# --- SPECIFIC TO BOMBARDIER
# Setup shoot params
self.vel = 0.8
self.ang = 0
self.da = 0.005
self.dv = 0.0005
# used for color-coding trajectories by speed
self.maxspeed = 2.2
# one time graphics setup
# for plot handles
self.trajline = None
self.startpt = None
self.endpt = None
self.quartiles = None
# Axes background colour
self.axisbgcol = axisbgcol
#Setup code
DOI = [(-xdomain_halfwidth,xdomain_halfwidth),(0,1)]
self.clean() # in case rerun in same session
self.add_fig('master',
title='Bombardier',
xlabel='x', ylabel='y',
domain=DOI)
#Setup all layers
self.add_layer('trajs')
self.add_layer('bodies', kind='patch')
self.add_layer('text', kind='text')
self.name = 'gamespace'
self.setup({'11':
{'name': self.name,
'scale': DOI,
'layers':['trajs', 'bodies', 'text'],
'callbacks':'*',
'axes_vars': ['x', 'y']
}
},
size=(9, 7), with_times=False, basic_widgets=False)
self.fignum = 1
fig_struct, fig = self.plotter._resolve_fig('master')
self.ax = fig_struct.arrange['11']['axes_obj']
self.add_widget(Slider, callback=self.updateAng, axlims = (0.1, 0.055, 0.65, 0.03),
label='Shoot Angle', valmin= -maxangle, valmax= maxangle,
valinit= self.ang, color='b', dragging=False, valfmt='%2.3f')
self.add_widget(Slider, callback=self.updateVel, axlims=(0.1, 0.02, 0.65, 0.03),
label='Shoot Speed', valmin=0.01, valmax=2,
valinit=self.vel, color='b',
dragging=False, valfmt='%1.4f')
# assume max of N-2 planetoid bodies + target + source
self.N = len(bodies)
self.gen_versioner = common.gen_versioner(os.path.abspath('.'),
self.name,
'simgen_N%i'%self.N,
gentype, 1)
# Make this more generic for ABC
self.setup_pars(bodies)
# --- END OF BOMBARDIER SPECIFICS
# Move these to a _recreate method than can be reused for un-pickling
self.add_widget(Button, callback=self.go, axlims=(0.005, 0.1, 0.045, 0.03),
label='Go!')
self.add_widget(Button, callback=self.line_to_event,
axlims=(0.005, 0.15, 0.08, 0.03), label='Event!')
# context_changed flag set when new objects created using declare_in_context(),
# and unset when Generator is created with the new context code included
self.context_changed = False
self.setup_gen(self.model_namer)
self.mouse_cid = None # event-connection ID
self.go(run=False)
# force call to graphics_refresh because run=False above
self.graphics_refresh(cla=False)
# TEMP
#plt.show()
# next_fighandle for whenever a new model is put in a new figure (new game instance)
next_fighandle += 1
def graphics_refresh(self, cla=True):
if cla:
self.ax.cla()
#Make quartiles
xquarts = Point({'x': 4})
yquarts = Point({'y': 4})
try:
n = len(self.points)
coorddict = {'xq':
{'x':'xq', 'y':'yq', 'layer':'trajs', 'name':'quarts1', 'style':'kd'}
}
quarts = Pointset({'coordarray': np.array([[self.points['x'][int(0.25*n)], self.points['x'][int(0.5*n)], self.points['x'][int(0.75*n)]],
[self.points['y'][int(0.25*n)], self.points['y'][int(0.5*n)], self.points['y'][int(0.75*n)]]]),
'coordnames': ['xq', 'yq']})
self.add_data_points(quarts, coorddict=coorddict)
except TypeError:
pass
#Traj Pointset
coorddict = {'x':
{'x':'x', 'y':'y','layer':'trajs','name':'data1', 'object':'collection'},
#{'x':'x', 'y':'y','layer':'trajs', 'object':'collection'},
'speed':
{'map_color_to':'x'}
}
self.add_data_points(self.points, coorddict=coorddict)
#Bodies Pointset
bodsPoints = Pointset({'coordarray': np.array([[self.pos[i][0] for i in range(len(self.pos))],
[self.pos[i][1] for i in range(len(self.pos))],
[self.radii[i] for i in range(len(self.radii))]]),
'coordnames': ['px', 'py', 'radii']})
coorddict = {'px':
{'x':'px', 'y':'py','layer':'bodies','name':'bods1', 'style':'g', 'object':'circle'},
'radii':
{'map_radius_to':'px'}
}
self.add_data_points(bodsPoints, coorddict=coorddict)
pos = np.array(self.pos).transpose()
for i in range(len(pos[0])):
self.plotter.add_text(pos[0][i], pos[1][i], i, style='k', layer='text')
self.show(rebuild=False)
#def declare_in_context(self, con_obj):
## context_changed flag set when new objects created and unset when Generator is
## created with the new context code included
#self.context_changed = True
#self.context_objects.append(con_obj)
def __str__(self):
return self.name
def setup_pars(self, data):
# Should generalize to non-bombardier application
N = self.N
radii = {}
density = {}
pos = {}
for i, body in data.items():
pos[i] = pp.Point2D(body['position'][0], body['position'][1],
labels={'body': i})
radii[i] = body['radius']
density[i] = body['density']
ixs = range(N)
self.radii = [radii[i] for i in ixs]
self.density = [density[i] for i in ixs]
self.pos = [pos[i] for i in ixs] # planet positions
self.masses = [density[i]*np.pi*r*r for (i,r) in enumerate(self.radii)]
rdict = dict([('r%i' %i, self.radii[i]) for i in ixs])
mdict = dict([('m%i' %i, self.masses[i]) for i in ixs])
posxdict = dict([('bx%i' %i, pos[i][0]) for i in ixs])
posydict = dict([('by%i' %i, pos[i][1]) for i in ixs])
pardict = {'G': G} # global param for gravitational constant
pardict.update(rdict)
pardict.update(mdict)
pardict.update(posxdict)
pardict.update(posydict)
self.body_pars = pardict
self.icpos = np.array((0.0, 0.08))
self.icvel = np.array((0.0, 0.0))
def make_gen(self, pardict, name):
# scrape GUI diagnostic object extras for generator
extra_events = []
extra_fnspecs = {}
extra_pars = {}
extra_auxvars = {}
for con_obj in self.context_objects.values():
extra_events.append(con_obj.extra_events)
extra_fnspecs.update(con_obj.extra_fnspecs)
extra_pars.update(con_obj.extra_pars)
extra_auxvars.update(con_obj.extra_auxvars)
Fx_str = ""
Fy_str = ""
for i in range(self.N):
Fx_str += "-G*m%i*(x-bx%i)/pow(d(x,y,bx%i,by%i),3)" % (i,i,i,i)
Fy_str += "-G*m%i*(y-by%i)/pow(d(x,y,bx%i,by%i),3)" % (i,i,i,i)
DSargs = args()
DSargs.varspecs = {'vx': Fx_str, 'x': 'vx',
'vy': Fy_str, 'y': 'vy',
'Fx_out': 'Fx(x,y)', 'Fy_out': 'Fy(x,y)',
'speed': 'sqrt(vx*vx+vy*vy)',
'bearing': '90-180*atan2(vy,vx)/pi'}
DSargs.varspecs.update(extra_auxvars)
auxfndict = {'Fx': (['x', 'y'], Fx_str),
'Fy': (['x', 'y'], Fy_str),
'd': (['xx', 'yy', 'x1', 'y1'], "sqrt((xx-x1)*(xx-x1)+(yy-y1)*(yy-y1))")
}
DSargs.auxvars = ['Fx_out', 'Fy_out', 'speed', 'bearing'] + \
list(extra_auxvars.keys())
DSargs.pars = pardict
DSargs.pars.update(extra_pars)
DSargs.fnspecs = auxfndict
DSargs.fnspecs.update(extra_fnspecs)
DSargs.algparams = {'init_step':0.001,
'max_step': 0.01,
'max_pts': 20000,
'maxevtpts': 2,
'refine': 5}
targetlang = \
self.gen_versioner._targetlangs[self.gen_versioner.gen_type]
# Events for external boundaries (left, right, top, bottom)
Lev = Events.makeZeroCrossEvent('x+%f'%xdomain_halfwidth, -1,
{'name': 'Lev',
'eventtol': 1e-5,
'precise': True,
'term': True},
varnames=['x'],
targetlang=targetlang)
Rev = Events.makeZeroCrossEvent('x-%f'%xdomain_halfwidth, 1,
{'name': 'Rev',
'eventtol': 1e-5,
'precise': True,
'term': True},
varnames=['x'],
targetlang=targetlang)
Tev = Events.makeZeroCrossEvent('y-1', 1,
{'name': 'Tev',
'eventtol': 1e-5,
'precise': True,
'term': True},
varnames=['y'],
targetlang=targetlang)
Bev = Events.makeZeroCrossEvent('y', -1,
{'name': 'Bev',
'eventtol': 1e-5,
'precise': True,
'term': True},
varnames=['y'],
targetlang=targetlang)
# Events for planetoids
bevs = []
for i in range(self.N):
bev = Events.makeZeroCrossEvent('d(x,y,bx%i,by%i)-r%i' % (i,i,i),
-1,
{'name': 'b%iev' %i,
'eventtol': 1e-5,
'precise': True,
'term': True},
varnames=['x','y'],
parnames=list(pardict.keys()),
fnspecs=auxfndict,
targetlang=targetlang)
bevs.append(bev)
DSargs.events = [Lev, Rev, Tev, Bev] + bevs + extra_events
DSargs.checklevel = 2
DSargs.ics = {'x': self.icpos[0], 'y': self.icpos[1],
'vx': 0., 'vy': 1.5}
DSargs.name = name
DSargs.tdomain = [0, 10000]
DSargs.tdata = [0, 50]
# turns arguments into Generator then embed into Model object
self.model = self.gen_versioner.make(DSargs)
def line_to_event(self, e):
ltarget = self.selected_object
ltarget.make_event_def('target%i' % self._event_num, 0)
self.setup_gen(self.model_namer)
# make event terminal
self.model.setDSEventTerm('gen', 'exit_ev_target%i' % self._event_num, True)
self._event_num += 1
def go(self, run=True):
"""
Note: This method can only start a trajectory from the
launcher at the bottom of the screen!
To shoot from a specific point that's been set by hand,
call self.run() then self.graphics_refresh(cla=False)
"""
a = self.ang
v = self.vel
# Angle a of shooting is relative to vertical, up to +/- maxangle degrees
if a > maxangle:
# assume is vestigial from a different initial condition
a = maxangle
elif a < -maxangle:
a = -maxangle
rad = pi*(a-90)/180.
x = self.radii[0]*cos(rad)
y = -self.radii[0]*sin(rad)
vx = v*cos(rad)
vy = -v*sin(rad)
self.model.set(ics={'vx': vx, 'vy': vy,
'x': x, 'y': y})
if run:
self.run()
self.graphics_refresh(cla=False)
self.masterWin.canvas.draw()
plt.draw()
def set(self, pair, ic=None, by_vel=False):
"""Set solution pair (ang, speed) and optional (x,y)
initial condition, where ang is in degrees.
With option by_vel=True (default False),
the pair will be treated as (vx, vy) instead
"""
assert len(pair) == 2
if ic is not None:
assert 'x' in ic and 'y' in ic and len(ic) == 2
self.model.set(ics=ic)
self.icpos = ic
if by_vel:
vx, vy = pair
# both conversions in this section are -90?
self.ang = 180*atan2(vy,vx)/pi - 90
self.vel = sqrt(vx*vx+vy*vy)
else:
# can't set ang and vel according to rules for regular
# shooting because we are reconstructing a partial
# trajectory out in space
self.ang, self.vel = pair
rad = pi*(self.ang-90)/180.
vx = self.vel*cos(rad)
vy = -self.vel*sin(rad)
self.model.set(ics={'vx': vx, 'vy': vy})
else:
self.setAng(pair[0])
self.setVel(pair[1])
def setAng(self, ang):
self.widgets['Shoot Angle'].set_val(ang)
def setVel(self, vel):
self.widgets['Shoot Speed'].set_val(vel)
def updateAng(self, ang):
if ang < -maxangle:
ang = -maxangle
elif ang > maxangle:
ang = maxangle
self.ang = ang
self.go(run=False)
def updateVel(self, vel):
if vel < 0.01:
print("Velocity must be >= 0.01")
vel = 0.01
self.vel = vel
self.go(run=False)
def run(self, tmax=None):
self.model.compute('test', force=True)
self.traj = self.model.trajectories['test']
self.add_data_traj(self.traj)
self.pts = self.points #Shouldn't have to do this.
if self.calc_context is not None:
# Update calc context
self.calc_context()
def get_forces(self, x, y):
"""
For given x, y coord arguments, returns two dictionaries keyed
by body number (1-N):
net force magnitude, force vector
"""
# Bombardier specific
Fxs = []
Fys = []
Fs = []
pars = self.model.query('pars')
ixs = range(self.N)
for i in ixs:
m = pars['m%i'%i]
bx = pars['bx%i'%i]
by = pars['by%i'%i]
p = pow(pp.distfun(x,y,bx,by),3)
Fx = -m*(x-bx)/p
Fy = -m*(y-by)/p
Fxs.append(Fx)
Fys.append(Fy)
Fs.append(sqrt(Fx*Fx+Fy*Fy))
return dict(zip(ixs, Fs)), dict(zip(ixs, zip(Fxs, Fys)))
def set_planet_data(self, n, data):
assert n in range(self.N)
# default to old radius, unless updated (for masses)
r = self.model.query('pars')['r%i'%n]
d = self.density[n]
pardict = {}
for key, val in data.items():
if key == 'r':
pardict['r%i'%n] = val
r = val
self.radii[n] = r
elif key == 'x':
pardict['bx%i'%n] = val
p = self.pos[n]
self.pos[n] = (val, p.y)
elif key == 'y':
pardict['by%i'%n] = val
p = self.pos[n]
self.pos[n] = (p.x, val)
elif key == 'd':
d = val
self.density[n] = d
else:
raise ValueError("Invalid parameter key: %s"%key)
pardict['m%i'%n] = G*d*np.pi*r*r
self.model.set(pars=pardict)
self.body_pars.update(pardict)
self.trajline = None
self.startpt = None
self.endpt = None
self.go(run=False)
self.graphics_refresh()
def model_namer(self):
name = 'sim_N%i'%self.N+'_fig%i'%self.fignum
return name
# also make a circular version (using radial event)
class target4D_line(qt_feature_leaf):
"""
Parameters expected:
--------------------
pt1, pt2 = Point2D specifying start and end of line in physical space
speed, bearing = Interval objects for speed and bearing (may be singletons)
N.B. bearing is measured relative to the perpendicular to the angle
of the line, for convenience. I.e., 0 is fully transverse in direction
of event detection direction, so typical intervals are [-45, 45]
loc_event_name = name of zero-crossing event detecting goal location
Assumptions:
------------
System contains a uni-directional non-terminal event for the physical location
"""
def evaluate(self, target):
# target should be a model interface object
ptsFS = target.test_traj.sample()
pt1 = self.pars.pt1
pt2 = self.pars.pt2
speed_inter = self.pars.speed_inter
bearing_inter = self.pars.bearing_inter
ev_name = self.pars.loc_event_name
# Compute metric for closeness along, and perpendicular to,
# pt1-pt2 line.
# Did zero crossing event occur with direction consistent with heading?
event_ix_dict = ptsFS.labels.by_label['Event:'+ev_name]
if event_ix_dict is not None:
if len(event_ix_dict) != 1:
raise ValueError
else:
ix = list(event_ix_dict.keys())[0]
check_time = ptsFS['t'][ix]
event_pt = ptsFS[ix]
else:
conditions_met = False
# how close did the trajectory get, perpendicularly?
print("Failed")
return False
# check that perp distance of event_pt to pt1-pt2 line is < epsilon
# tolerance of event
# model = target.get('events', event_pt, check_time)
ev = target.query('events')[ev_name]
# initial value for conditions_met ...
conditions_met = pp.distance_to_line(event_pt[['x','y']], (pt1, pt2)) < ev.eventtol # ?
# get distance of event_pt (x,y) coords to pt1 and pt2
# if either is > |pt1-pt2| then outside of sub-domain
line_seg_len = np.linalg.norm(pt2 - pt1)
dist1 = np.linalg.norm(event_pt[['x','y']] - pt1)
dist2 = np.linalg.norm(event_pt[['x','y']] - pt2)
conditions_met = conditions_met and ((dist1 < line_seg_len) and (dist2 < line_seg_len))
# test if event_pt speed in speed_inter and bearing in bearing_inter
conditions_met = conditions_met and event_pt['speed'] \
in self.pars.speed_inter
# define the sense of the line's angle based on event direction:
# +1 perpendicular is angle 0
line_vec = pt2 - pt1
n = pp.get_orthonormal(line_vec)
vel_vec = pp.Point2D({'x': event_pt['vx'], 'y': event_pt['vy']})
speed = event_pt['speed']
vel_vec = vel_vec/speed # normalized to unit length
n_dot_v = np.dot(n, vel_vec)
if n_dot_v < 0:
# sense of n is backwards
n = -n
rel_bearing = pp.get_bearing(n, vel_vec)
conditions_met = conditions_met and rel_bearing \
in self.pars.bearing_inter
# compute metric for closeness of velocity magnitude and direction
return conditions_met
# ===========================================
def combine_planets(body_data, body1, body2):
"""
Return effective combined single body from two bodies given
as integers
"""
r1 = body_data[body1]['radius']
r2 = body_data[body2]['radius']
m1 = np.pi*r1*r1
m2 = np.pi*r2*r2
d1 = body_data[body1]['density']
d2 = body_data[body2]['density']
pos1 = pp.Point2D(body_data[body1]['position'][0], body_data[body1]['position'][1])
pos2 = pp.Point2D(body_data[body2]['position'][0], body_data[body2]['position'][1])
dp = pos2 - pos1
new_pos = pos2 - m1/(m1+m2)*dp
# weighted mean of masses
new_density = (m1*d1 + m2*d2)/(m1+m2)
new_radius = float(sqrt((m1+m2)/new_density/np.pi))
# mass is redundant
return {'radius': new_radius, 'density': new_density,
'mass': m1+m2, 'position': [new_pos[0], new_pos[1]]}
def make_forcelines(sim, x, y, i):
#flines = []
#for n in range(sim.N):
# i = n-1
#Fsi = Fs[i]
#Fvi_x, Fvi_y = Fvecs[i]
bx, by = sim.pos[i]
line_i = gx.line_GUI(sim, x, y, bx, by)
return line_i
def relative_angle(line1, line2):
# to help project line 1 onto line 2
return line1.ang - line2.ang
def project(sim, x, y, i, pline):
fline = make_forcelines(sim, x, y, i)
Fs, Fvecs = sim.get_forces(x,y)
F = Fs[i]
ra = relative_angle(fline, pline)
print(fline.ang_deg - pline.ang_deg)
proj_F = F*cos(ra)
bx, by = sim.pos[i]
proj_d = pp.distfun(x,y,bx,by)*cos(ra)
return proj_F, proj_d
def net_force_vs_line(sim, mesh_n=100):
"""
Computes net forces of all bodies projected onto the line
and to its normal direction.
The line is assumed to be currently selected in the sim
Mesh size given by optional argument mesh_n (default 100)
"""
line = sim.selected_object
if line is None:
raise ValueError("No line selected")
x1 = line.x1
y1 = line.y1
dx = line.dx
dy = line.dy
forces = np.zeros(mesh_n)
forces_normal = np.zeros(mesh_n)
for i, d in enumerate(np.linspace(0,1,mesh_n)):
x = x1 + d*dx
y = y1 + d*dy
Fs, Fvecs = sim.get_forces(x,y)
net_Fvec = np.sum(Fvecs.values(), 0)
net_F = np.linalg.norm(net_Fvec)
ang_Fvec = atan2(net_Fvec[1], net_Fvec[0])
rel_ang = line.ang - ang_Fvec
forces[i] = net_F*cos(rel_ang)
forces_normal[i] = net_F*sin(rel_ang)
return forces, forces_normal
def net_force_along_pts(sim, pts, bodies=None):
"""
Raw force magnitudes along a pointset (not projected)
for the selected bodies (by #), as well as total net force
due to *all* bodies present
If bodies is None (default) then all bodies with positive mass
will be assumed.
"""
num_pts = len(pts)
if bodies is None:
bodies = [i for i, d in enumerate(sim.density) if d > 0]
Fs_by_body = {}
net_Fs = np.zeros(num_pts)
for n in bodies:
Fs_by_body[n] = np.zeros(num_pts)
for i, (x,y) in enumerate(pts[['x','y']].coordarray.T):
Fs, Fvecs = sim.get_forces(x,y)
for n in bodies:
Fs_by_body[n][i] = Fs[n]
try:
net_Fs[i] = np.linalg.norm(np.sum(list(Fvecs.values()), 0))
except TypeError:
print(Fvecs.values())
print(np.sum(list(Fvecs.values()), 0))
return net_Fs, Fs_by_body
def plot_forces_along_line(sim, forces, fignum=2):
line = sim.selected_object
if line is None:
raise ValueError("No line selected")
plt.figure(fignum)
mesh_n = len(forces)
xaxis_range = linspace(0, line.length, mesh_n)
plt.plot(xaxis_range, forces)
plt.xlabel('distance along line')
plt.ylabel('net force')
plt.title('Forces vs. line %s' % line.name)
plt.hlines(0, 0, line.length)
axis('tight')
def pericenter_vs_n(sim, n, ecc, pt=None):
# closest passing point relative to body n (index n-1)
# at given point (or else IC assumed)
if pt is None:
pt = sim.model.query('ics')
p0 = np.array((pt['x'], pt['y'])) # e.g. (0.25, 0.3)
r0 = sim.pos[n]-p0
v0 = np.array((pt['vx'], pt['vy']))
v = np.linalg.norm(v0)
r = np.linalg.norm(r0)
m = sim.masses[n]
mu = G*m
# abs takes into account sign convention for hyperbolae
# vs. ellipses
eps = abs(v*v/2 - mu/r)
a = mu / (2*eps)
return abs((1-ecc)*a)
def apicenter_vs_n(sim, n, ecc, pt=None):
# furthest point relative to body n (index n-1)
# (assuming elliptical orbit, otherwise infinity is returned)
# at given point (or else IC assumed)
if pt is None:
pt = sim.model.query('ics')
if ecc >= 1:
return np.Inf
p0 = np.array((pt['x'], pt['y'])) # e.g. (0.25, 0.3)
r0 = sim.pos[n]-p0
v0 = np.array((pt['vx'], pt['vy']))
v = np.linalg.norm(v0)
r = np.linalg.norm(r0)
m = sim.masses[n]
mu = G*m
# abs takes into account sign convention for hyperbolae
# vs. ellipses
eps = abs(v*v/2 - mu/r)
a = mu / (2*eps)
return abs((1+ecc)*a)
def eccentricity_vs_n(sim, n, pt=None):
# relative to body n (index n-1)
# at given point (or else IC assumed)
if pt is None:
pt = sim.model.query('ics')
p0 = np.array((pt['x'], pt['y'])) # e.g. (0.25, 0.3)
v0 = np.array((pt['vx'], pt['vy']))
r0 = sim.pos[n]-p0
r_cross_v0 = float(np.cross(r0, v0))
m = sim.masses[n]
mu = G*m
v = np.linalg.norm(v0)
r = np.linalg.norm(r0)
e = sqrt(1+(v*v/(mu*mu) - 2/(r*mu))*(r_cross_v0)**2)
return e
class body_context(fovea.calc_context):
"""
Calculation context for a single planetary body with small satellite
of negligible mass. n specifies which body.
"""
def local_init(self, n, pt=None):
self._refresh_init_args = (n, pt)
if pt is None:
pt = self.sim.model.query('ics')
w = self.workspace
w.pt = pt
w.p0 = np.array((pt['x'], pt['y'])) # e.g. (0.25, 0.3)
w.v0 = np.array((pt['vx'], pt['vy']))
w.r0 = self.sim.pos[n]-w.p0
# np.double might be easier to make work with Symbolic
w.r_cross_v0 = np.double(np.cross(w.r0, w.v0))
w.m = self.sim.masses[n]
# ISSUE: G is a global!
w.mu = G*w.m
w.v = np.linalg.norm(w.v0)
w.r = np.linalg.norm(w.r0)
class calc_context_forces(fovea.calc_context):
def local_init(self):
self.workspace.net_Fs, self.workspace.Fs_by_body = \
net_force_along_pts(self.sim, self.sim.pts)
##def contextualize(context):
## def decorator(target):
## def wrapper():
## return target(context)
## return wrapper
## return decorator
##
##@contextualize(my_context(game4, 1))
##def eccentricity(con):
## return 1 # test
##
##def attach(con, fn):
## def wrapped_fn():
## return fn(con)
## con.__dict__[fn.__name__] = wrapped_fn
##attach(con4, eccentricity)
@prep('ecc')
def eccentricity(con):
return sqrt(1+(con.workspace.v*con.workspace.v/(con.workspace.mu*con.workspace.mu) \
- 2/(con.workspace.r*con.workspace.mu))*(con.workspace.r_cross_v0)**2)
@prep('eps')
def total_energy(con):
# abs takes into account sign convention for hyperbolae
# vs. ellipses
return abs(con.workspace.v*con.workspace.v/2 - con.workspace.mu/con.workspace.r)
@prep('a')
def semimajor(con):
return con.workspace.mu / (2*con.workspace.eps)
@prep('api')
def apicenter(con):
return abs((1+con.workspace.ecc)*con.workspace.a)
@prep('peri')
def pericenter(con):
return abs((1-con.workspace.ecc)*con.workspace.a)
|
bsd-3-clause
|
alphaBenj/zipline
|
zipline/pipeline/loaders/earnings_estimates.py
|
1
|
63291
|
from abc import abstractmethod, abstractproperty
import numpy as np
import pandas as pd
from six import viewvalues
from toolz import groupby
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import (
Datetime641DArrayOverwrite,
Datetime64Overwrite,
Float641DArrayOverwrite,
Float64Multiply,
Float64Overwrite,
)
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype
from zipline.pipeline.loaders.utils import (
ffill_across_cols,
last_in_date_group
)
INVALID_NUM_QTRS_MESSAGE = "Passed invalid number of quarters %s; " \
"must pass a number of quarters >= 0"
NEXT_FISCAL_QUARTER = 'next_fiscal_quarter'
NEXT_FISCAL_YEAR = 'next_fiscal_year'
NORMALIZED_QUARTERS = 'normalized_quarters'
PREVIOUS_FISCAL_QUARTER = 'previous_fiscal_quarter'
PREVIOUS_FISCAL_YEAR = 'previous_fiscal_year'
SHIFTED_NORMALIZED_QTRS = 'shifted_normalized_quarters'
SIMULATION_DATES = 'dates'
def normalize_quarters(years, quarters):
return years * 4 + quarters - 1
def split_normalized_quarters(normalized_quarters):
years = normalized_quarters // 4
quarters = normalized_quarters % 4
return years, quarters + 1
# These metadata columns are used to align event indexers.
metadata_columns = frozenset({
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
})
def required_estimates_fields(columns):
"""
Compute the set of resource columns required to serve
`columns`.
"""
# We also expect any of the field names that our loadable columns
# are mapped to.
return metadata_columns.union(viewvalues(columns))
def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
"""
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
)
def add_new_adjustments(adjustments_dict,
adjustments,
column_name,
ts):
try:
adjustments_dict[column_name][ts].extend(adjustments)
except KeyError:
adjustments_dict[column_name][ts] = adjustments
class EarningsEstimatesLoader(PipelineLoader):
"""
An abstract pipeline loader for estimates data that can load data a
variable number of quarters forwards/backwards from calendar dates
depending on the `num_announcements` attribute of the columns' dataset.
If split adjustments are to be applied, a loader, split-adjusted columns,
and the split-adjusted asof-date must be supplied.
Parameters
----------
estimates : pd.DataFrame
The raw estimates data.
``estimates`` must contain at least 5 columns:
sid : int64
The asset id associated with each estimate.
event_date : datetime64[ns]
The date on which the event that the estimate is for will/has
occurred..
timestamp : datetime64[ns]
The date on which we learned about the estimate.
fiscal_quarter : int64
The quarter during which the event has/will occur.
fiscal_year : int64
The year during which the event has/will occur.
name_map : dict[str -> str]
A map of names of BoundColumns that this loader will load to the
names of the corresponding columns in `events`.
"""
def __init__(self,
estimates,
name_map):
validate_column_specs(
estimates,
name_map
)
self.estimates = estimates[
estimates[EVENT_DATE_FIELD_NAME].notnull() &
estimates[FISCAL_QUARTER_FIELD_NAME].notnull() &
estimates[FISCAL_YEAR_FIELD_NAME].notnull()
]
self.estimates[NORMALIZED_QUARTERS] = normalize_quarters(
self.estimates[FISCAL_YEAR_FIELD_NAME],
self.estimates[FISCAL_QUARTER_FIELD_NAME],
)
self.array_overwrites_dict = {
datetime64ns_dtype: Datetime641DArrayOverwrite,
float64_dtype: Float641DArrayOverwrite,
}
self.scalar_overwrites_dict = {
datetime64ns_dtype: Datetime64Overwrite,
float64_dtype: Float64Overwrite,
}
self.name_map = name_map
@abstractmethod
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
raise NotImplementedError('get_zeroth_quarter_idx')
@abstractmethod
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
raise NotImplementedError('get_shifted_qtrs')
@abstractmethod
def create_overwrite_for_estimate(self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments,
split_adjusted_asof_idx):
raise NotImplementedError('create_overwrite_for_estimate')
@abstractproperty
def searchsorted_side(self):
return NotImplementedError('searchsorted_side')
def get_requested_quarter_data(self,
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates):
"""
Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom.
"""
zero_qtr_data_idx = zero_qtr_data.index
requested_qtr_idx = pd.MultiIndex.from_arrays(
[
zero_qtr_data_idx.get_level_values(0),
zero_qtr_data_idx.get_level_values(1),
self.get_shifted_qtrs(
zeroth_quarter_idx.get_level_values(
NORMALIZED_QUARTERS,
),
num_announcements,
),
],
names=[
zero_qtr_data_idx.names[0],
zero_qtr_data_idx.names[1],
SHIFTED_NORMALIZED_QTRS,
],
)
requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx]
requested_qtr_data = requested_qtr_data.reset_index(
SHIFTED_NORMALIZED_QTRS,
)
# Calculate the actual year/quarter being requested and add those in
# as columns.
(requested_qtr_data[FISCAL_YEAR_FIELD_NAME],
requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \
split_normalized_quarters(
requested_qtr_data[SHIFTED_NORMALIZED_QTRS]
)
# Once we're left with just dates as the index, we can reindex by all
# dates so that we have a value for each calendar date.
return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates)
def get_split_adjusted_asof_idx(self, dates):
"""
Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split.
"""
split_adjusted_asof_idx = dates.searchsorted(
self._split_adjusted_asof
)
# The split-asof date is after the date index.
if split_adjusted_asof_idx == len(dates):
split_adjusted_asof_idx = len(dates) - 1
elif self._split_adjusted_asof < dates[0].tz_localize(None):
split_adjusted_asof_idx = -1
return split_adjusted_asof_idx
def collect_overwrites_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_idx,
columns,
all_adjustments_for_sid,
sid):
"""
Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed.
"""
# If data was requested for only 1 date, there can never be any
# overwrites, so skip the extra work.
if len(dates) == 1:
return
next_qtr_start_indices = dates.searchsorted(
group[EVENT_DATE_FIELD_NAME].values,
side=self.searchsorted_side,
)
qtrs_with_estimates = group.index.get_level_values(
NORMALIZED_QUARTERS
).values
for idx in next_qtr_start_indices:
if 0 < idx < len(dates):
# Find the quarter being requested in the quarter we're
# crossing into.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid,
].iloc[idx]
# Only add adjustments if the next quarter starts somewhere
# in our date index for this sid. Our 'next' quarter can
# never start at index 0; a starting index of 0 means that
# the next quarter's event date was NaT.
self.create_overwrites_for_quarter(
all_adjustments_for_sid,
idx,
last_per_qtr,
qtrs_with_estimates,
requested_quarter,
sid,
sid_idx,
columns
)
def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs):
"""
Parameters
----------
group : pd.DataFrame
The data for the given sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_to_idx : dict[int -> int]
A dictionary mapping sid to he sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index. This dictionary is for adjustments for ALL sids. It is
modified as adjustments are collected.
kwargs :
Additional arguments used in collecting adjustments; unused here.
"""
# Collect all adjustments for a given sid.
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def merge_into_adjustments_for_all_sids(self,
all_adjustments_for_sid,
col_to_all_adjustments):
"""
Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids.
"""
for col_name in all_adjustments_for_sid:
if col_name not in col_to_all_adjustments:
col_to_all_adjustments[col_name] = {}
for ts in all_adjustments_for_sid[col_name]:
adjs = all_adjustments_for_sid[col_name][ts]
add_new_adjustments(col_to_all_adjustments,
adjs,
col_name,
ts)
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
"""
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs
)
return col_to_all_adjustments
def create_overwrites_for_quarter(self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
"""
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(
col,
next_qtr_start_idx,
sid_idx)]
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
def overwrite_with_null(self,
column,
next_qtr_start_idx,
sid_idx):
return self.scalar_overwrites_dict[column.dtype](
0,
next_qtr_start_idx - 1,
sid_idx,
sid_idx,
column.missing_value
)
def load_adjusted_array(self, columns, dates, assets, mask):
# Separate out getting the columns' datasets and the datasets'
# num_announcements attributes to ensure that we're catching the right
# AttributeError.
col_to_datasets = {col: col.dataset for col in columns}
try:
groups = groupby(lambda col:
col_to_datasets[col].num_announcements,
col_to_datasets)
except AttributeError:
raise AttributeError("Datasets loaded via the "
"EarningsEstimatesLoader must define a "
"`num_announcements` attribute that defines "
"how many quarters out the loader should load"
" the data relative to `dates`.")
if any(num_qtr < 0 for num_qtr in groups):
raise ValueError(
INVALID_NUM_QTRS_MESSAGE % ','.join(
str(qtr) for qtr in groups if qtr < 0
)
)
out = {}
# To optimize performance, only work below on assets that are
# actually in the raw data.
assets_with_data = set(assets) & set(self.estimates[SID_FIELD_NAME])
last_per_qtr, stacked_last_per_qtr = self.get_last_data_per_qtr(
assets_with_data,
columns,
dates
)
# Determine which quarter is immediately next/previous for each
# date.
zeroth_quarter_idx = self.get_zeroth_quarter_idx(stacked_last_per_qtr)
zero_qtr_data = stacked_last_per_qtr.loc[zeroth_quarter_idx]
for num_announcements, columns in groups.items():
requested_qtr_data = self.get_requested_quarter_data(
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates,
)
# Calculate all adjustments for the given quarter and accumulate
# them for each column.
col_to_adjustments = self.get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns
)
# Lookup the asset indexer once, this is so we can reindex
# the assets returned into the assets requested for each column.
# This depends on the fact that our column multiindex has the same
# sids for each field. This allows us to do the lookup once on
# level 1 instead of doing the lookup each time per value in
# level 0.
asset_indexer = assets.get_indexer_for(
requested_qtr_data.columns.levels[1],
)
for col in columns:
column_name = self.name_map[col.name]
# allocate the empty output with the correct missing value
output_array = np.full(
(len(dates), len(assets)),
col.missing_value,
dtype=col.dtype,
)
# overwrite the missing value with values from the computed
# data
output_array[
:,
asset_indexer,
] = requested_qtr_data[column_name].values
out[col] = AdjustedArray(
output_array,
# There may not be any adjustments at all (e.g. if
# len(date) == 1), so provide a default.
dict(col_to_adjustments.get(column_name, {})),
col.missing_value,
)
return out
def get_last_data_per_qtr(self, assets_with_data, columns, dates):
"""
Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
dates : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid].
"""
# Get a DataFrame indexed by date with a MultiIndex of columns of [
# self.estimates.columns, normalized_quarters, sid], where each cell
# contains the latest data for that day.
last_per_qtr = last_in_date_group(
self.estimates,
dates,
assets_with_data,
reindex=True,
extra_groupers=[NORMALIZED_QUARTERS],
)
# Forward fill values for each quarter/sid/dataset column.
ffill_across_cols(last_per_qtr, columns, self.name_map)
# Stack quarter and sid into the index.
stacked_last_per_qtr = last_per_qtr.stack(
[SID_FIELD_NAME, NORMALIZED_QUARTERS],
)
# Set date index name for ease of reference
stacked_last_per_qtr.index.set_names(
SIMULATION_DATES,
level=0,
inplace=True,
)
stacked_last_per_qtr = stacked_last_per_qtr.sort_values(
EVENT_DATE_FIELD_NAME,
)
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime(
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
)
return last_per_qtr, stacked_last_per_qtr
class NextEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = 'right'
def create_overwrite_for_estimate(self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None):
return [self.array_overwrites_dict[column.dtype](
0,
next_qtr_start_idx - 1,
sid_idx,
sid_idx,
last_per_qtr[
column_name,
requested_quarter,
sid,
].values[:next_qtr_start_idx],
)]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs + (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the next quarter by picking out the upcoming release for
each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
next_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next event.
"""
next_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] >=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(0)
return next_releases_per_date.index
class PreviousEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = 'left'
def create_overwrite_for_estimate(self,
column,
column_name,
dates,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None,
split_dict=None):
return [self.overwrite_with_null(
column,
next_qtr_start_idx,
sid_idx,
)]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs - (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event.
"""
previous_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(-1)
return previous_releases_per_date.index
def validate_split_adjusted_column_specs(name_map, columns):
to_be_split = set(columns)
available = set(name_map.keys())
extra = to_be_split - available
if extra:
raise ValueError(
"EarningsEstimatesLoader got the following extra columns to be "
"split-adjusted: {extra}.\n"
"Got Columns: {to_be_split}\n"
"Available Columns: {available}".format(
extra=sorted(extra),
to_be_split=sorted(to_be_split),
available=sorted(available),
)
)
class SplitAdjustedEstimatesLoader(EarningsEstimatesLoader):
"""
Estimates loader that loads data that needs to be split-adjusted.
Parameters
----------
split_adjustments_loader : SQLiteAdjustmentReader
The loader to use for reading split adjustments.
split_adjusted_column_names : iterable of str
The column names that should be split-adjusted.
split_adjusted_asof : pd.Timestamp
The date that separates data into 2 halves: the first half is the set
of dates up to and including the split_adjusted_asof date. All
adjustments occurring during this first half are applied to all
dates in this first half. The second half is the set of dates after
the split_adjusted_asof date. All adjustments occurring during this
second half are applied sequentially as they appear in the timeline.
"""
def __init__(self,
estimates,
name_map,
split_adjustments_loader,
split_adjusted_column_names,
split_adjusted_asof):
validate_split_adjusted_column_specs(name_map,
split_adjusted_column_names)
self._split_adjustments = split_adjustments_loader
self._split_adjusted_column_names = split_adjusted_column_names
self._split_adjusted_asof = split_adjusted_asof
self._split_adjustment_dict = {}
super(SplitAdjustedEstimatesLoader, self).__init__(
estimates,
name_map
)
@abstractmethod
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
raise NotImplementedError('collect_split_adjustments')
def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
split_adjusted_asof_idx=None,
split_adjusted_cols_for_group=None):
"""
Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted.
"""
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
(pre_adjustments,
post_adjustments) = self.retrieve_split_adjustment_data_for_sid(
dates, sid, split_adjusted_asof_idx
)
sid_estimates = self.estimates[
self.estimates[SID_FIELD_NAME] == sid
]
# We might not have any overwrites but still have
# adjustments, and we will need to manually add columns if
# that is the case.
for col_name in split_adjusted_cols_for_group:
if col_name not in all_adjustments_for_sid:
all_adjustments_for_sid[col_name] = {}
self.collect_split_adjustments(
all_adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_to_idx[sid],
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
split_adjusted_cols_for_group
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Calculates both split adjustments and overwrites for all sids.
"""
split_adjusted_cols_for_group = [
self.name_map[col.name]
for col in columns
if self.name_map[col.name] in self._split_adjusted_column_names
]
# Add all splits to the adjustment dict for this sid.
split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(
dates
)
return super(SplitAdjustedEstimatesLoader, self).get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
split_adjusted_cols_for_group=split_adjusted_cols_for_group,
split_adjusted_asof_idx=split_adjusted_asof_idx
)
def determine_end_idx_for_adjustment(self,
adjustment_ts,
dates,
upper_bound,
requested_quarter,
sid_estimates):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
"""
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) &
(sid_estimates[TS_FIELD_NAME] >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
newest_kd_for_qtr
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx
def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments
def collect_post_asof_split_adjustments(self,
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns):
"""
Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date.
"""
col_to_split_adjustments = {}
if post_adjustments:
# Get an integer index
requested_qtr_timeline = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS
][sid].reset_index()
requested_qtr_timeline = requested_qtr_timeline[
requested_qtr_timeline[sid].notnull()
]
# Split the data into range by quarter and determine which quarter
# was being requested in each range.
# Split integer indexes up by quarter range
qtr_ranges_idxs = np.split(
requested_qtr_timeline.index,
np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1
)
requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]]
for r in qtr_ranges_idxs]
# Try to apply each adjustment to each quarter range.
for i, qtr_range in enumerate(qtr_ranges_idxs):
for adjustment, date_index, timestamp in zip(
*post_adjustments
):
# In the default case, apply through the end of the quarter
upper_bound = qtr_range[-1]
# Find the smallest KD in estimates that is on or after the
# date of the given adjustment. Apply the given adjustment
# until that KD.
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
requested_qtr_data.index,
upper_bound,
requested_quarters_per_range[i],
sid_estimates
)
# In the default case, apply adjustment on the first day of
# the quarter.
start_idx = qtr_range[0]
# If the adjustment happens during this quarter, apply the
# adjustment on the day it happens.
if date_index > start_idx:
start_idx = date_index
# We only want to apply the adjustment if we have any stale
# data to apply it to.
if qtr_range[0] <= end_idx:
for column_name in requested_split_adjusted_columns:
if column_name not in col_to_split_adjustments:
col_to_split_adjustments[column_name] = {}
adj = Float64Multiply(
# Always apply from first day of qtr
qtr_range[0],
end_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(
col_to_split_adjustments,
[adj],
column_name,
start_idx
)
return col_to_split_adjustments
def retrieve_split_adjustment_data_for_sid(self,
dates,
sid,
split_adjusted_asof_idx):
"""
dates : pd.DatetimeIndex
The calendar dates.
sid : int
The sid for which we want to retrieve adjustments.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
Returns
-------
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
"""
adjustments = self._split_adjustments.get_adjustments_for_sid(
'splits', sid
)
sorted(adjustments, key=lambda adj: adj[0])
# Get rid of any adjustments that happen outside of our date index.
adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1],
adjustments))
adjustment_values = np.array([adj[1] for adj in adjustments])
timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments])
# We need the first date on which we would have known about each
# adjustment.
date_indexes = dates.searchsorted(timestamps)
pre_adjustment_idxs = np.where(
date_indexes <= split_adjusted_asof_idx
)[0]
last_adjustment_split_asof_idx = -1
if len(pre_adjustment_idxs):
last_adjustment_split_asof_idx = pre_adjustment_idxs.max()
pre_adjustments = (
adjustment_values[:last_adjustment_split_asof_idx + 1],
date_indexes[:last_adjustment_split_asof_idx + 1]
)
post_adjustments = (
adjustment_values[last_adjustment_split_asof_idx + 1:],
date_indexes[last_adjustment_split_asof_idx + 1:],
timestamps[last_adjustment_split_asof_idx + 1:]
)
return pre_adjustments, post_adjustments
def _collect_adjustments(self,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
pre_adjustments_dict = self.collect_pre_split_asof_date_adjustments(
split_adjusted_asof_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
)
post_adjustments_dict = self.collect_post_asof_split_adjustments(
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns
)
return pre_adjustments_dict, post_adjustments_dict
def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
"""
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
)
class PreviousSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, PreviousEarningsEstimatesLoader
):
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
)
class NextSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, NextEarningsEstimatesLoader
):
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
)
|
apache-2.0
|
danielemichilli/LSPs
|
Create_diagnostics.py
|
1
|
2272
|
import argparse
import os
import shutil
import pandas as pd
import src.Paths as PATH
from LSpS import set_paths
from src import LSPplot
from src import Internet
from glob import glob
def parser():
'''
Command-line options
'''
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,description="The program creates a DataBase of the events in the observation.")
parser.add_argument('id_obs', help='Observation ID')
parser.add_argument('-plot', help="Run over confirmation observations.",action='store_true')
parser.add_argument('-store_online', help="Run over confirmation observations.",action='store_true')
parser.add_argument('-conf', help="Run over confirmation observations.",action='store_true')
args = parser.parse_args()
return args
def load_DB():
meta_data = pd.read_hdf(PATH.DB, 'meta_data')
pulses = pd.read_hdf(PATH.DB, 'pulses')
cands = pd.read_hdf(PATH.DB, 'candidates')
cands = cands[cands.main_cand == 0]
cands.sort_values('Sigma', inplace=True, ascending=False)
cands = cands.groupby('BEAM').head(10)
cands = cands.head(50)
cands = cands[ ((cands.N_pulses == 1) & (cands.Sigma>10.)) | ((cands.N_pulses > 1) & (cands.Sigma>16.)) ]
cands.sort_values('Sigma', inplace=True, ascending=False)
return meta_data, pulses, cands
def main(PATH):
args = parser()
PATH = set_paths(args, PATH)
PATH.DB = os.path.join(PATH.OBS_FOLDER,'sp/SinglePulses.hdf5')
if os.path.isdir(os.path.join(PATH.OBS_FOLDER, 'sp/candidates')): shutil.rmtree(os.path.join(PATH.OBS_FOLDER, 'sp/candidates'))
if args.conf: inc = 0
else: inc = 12
meta_data, pulses, cands = load_DB()
try:
if args.plot: LSPplot.output(args.id_obs, pulses, meta_data, cands, PATH.DB, inc=inc)
for diag_plot in glob(os.path.join(PATH.WRK_FOLDER, 'sp/candidates/*.pdf')): shutil.copy(diag_plot, PATH.DIAG_PLOT_FOLDER)
if args.store_online: Internet.upload(cands, args.id_obs, os.path.join(PATH.WRK_FOLDER,'sp/candidates/.'), meta_data, pulses)
finally:
shutil.copytree(os.path.join(PATH.WRK_FOLDER, 'sp/candidates'), os.path.join(PATH.OBS_FOLDER, 'sp/candidates'))
shutil.rmtree(PATH.WRK_FOLDER)
shutil.rmtree(PATH.TMP_FOLDER)
return
if __name__ == '__main__':
main(PATH)
|
mit
|
Averroes/statsmodels
|
statsmodels/stats/contrast.py
|
25
|
13612
|
from statsmodels.compat.python import range
import numpy as np
from scipy.stats import f as fdist
from scipy.stats import t as student_t
from scipy import stats
from statsmodels.tools.tools import clean0, fullrank
from statsmodels.compat.numpy import np_matrix_rank
#TODO: should this be public if it's just a container?
class ContrastResults(object):
"""
Class for results of tests of linear restrictions on coefficients in a model.
This class functions mainly as a container for `t_test`, `f_test` and
`wald_test` for the parameters of a model.
The attributes depend on the statistical test and are either based on the
normal, the t, the F or the chisquare distribution.
"""
def __init__(self, t=None, F=None, sd=None, effect=None, df_denom=None,
df_num=None, alpha=0.05, **kwds):
self.effect = effect # Let it be None for F
if F is not None:
self.distribution = 'F'
self.fvalue = F
self.statistic = self.fvalue
self.df_denom = df_denom
self.df_num = df_num
self.dist = fdist
self.dist_args = (df_num, df_denom)
self.pvalue = fdist.sf(F, df_num, df_denom)
elif t is not None:
self.distribution = 't'
self.tvalue = t
self.statistic = t # generic alias
self.sd = sd
self.df_denom = df_denom
self.dist = student_t
self.dist_args = (df_denom,)
self.pvalue = self.dist.sf(np.abs(t), df_denom) * 2
elif 'statistic' in kwds:
# TODO: currently targeted to normal distribution, and chi2
self.distribution = kwds['distribution']
self.statistic = kwds['statistic']
self.tvalue = value = kwds['statistic'] # keep alias
# TODO: for results instance we decided to use tvalues also for normal
self.sd = sd
self.dist = getattr(stats, self.distribution)
self.dist_args = ()
if self.distribution is 'chi2':
self.pvalue = self.dist.sf(self.statistic, df_denom)
else:
"normal"
self.pvalue = self.dist.sf(np.abs(value)) * 2
# cleanup
# should we return python scalar?
self.pvalue = np.squeeze(self.pvalue)
def conf_int(self, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
if self.effect is not None:
# confidence intervals
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.effect - q * self.sd
upper = self.effect + q * self.sd
return np.column_stack((lower, upper))
else:
raise NotImplementedError('Confidence Interval not available')
def __array__(self):
if hasattr(self, "fvalue"):
return self.fvalue
else:
return self.tvalue
def __str__(self):
return self.summary().__str__()
def __repr__(self):
return str(self.__class__) + '\n' + self.__str__()
def summary(self, xname=None, alpha=0.05, title=None):
"""Summarize the Results of the hypothesis test
Parameters
-----------
xname : list of strings, optional
Default is `c_##` for ## in p the number of regressors
alpha : float
significance level for the confidence intervals. Default is
alpha = 0.05 which implies a confidence level of 95%.
title : string, optional
Title for the params table. If not None, then this replaces the
default title
Returns
-------
smry : string or Summary instance
This contains a parameter results table in the case of t or z test
in the same form as the parameter results table in the model
results summary.
For F or Wald test, the return is a string.
"""
if self.effect is not None:
# TODO: should also add some extra information, e.g. robust cov ?
# TODO: can we infer names for constraints, xname in __init__ ?
if title is None:
title = 'Test for Constraints'
elif title == '':
# don't add any title,
# I think SimpleTable skips on None - check
title = None
# we have everything for a params table
use_t = (self.distribution == 't')
yname='constraints' # Not used in params_frame
if xname is None:
xname = ['c%d'%ii for ii in range(len(self.effect))]
from statsmodels.iolib.summary import summary_params
pvalues = np.atleast_1d(self.pvalue)
summ = summary_params((self, self.effect, self.sd, self.statistic,
pvalues, self.conf_int(alpha)),
yname=yname, xname=xname, use_t=use_t,
title=title)
return summ
elif hasattr(self, 'fvalue'):
# TODO: create something nicer for these casee
return '<F test: F=%s, p=%s, df_denom=%d, df_num=%d>' % \
(repr(self.fvalue), self.pvalue, self.df_denom, self.df_num)
else:
# generic
return '<Wald test: statistic=%s, p-value=%s>' % \
(self.statistic, self.pvalue)
def summary_frame(self, xname=None, alpha=0.05):
"""Return the parameter table as a pandas DataFrame
This is only available for t and normal tests
"""
if self.effect is not None:
# we have everything for a params table
use_t = (self.distribution == 't')
yname='constraints' # Not used in params_frame
if xname is None:
xname = ['c%d'%ii for ii in range(len(self.effect))]
from statsmodels.iolib.summary import summary_params_frame
summ = summary_params_frame((self, self.effect, self.sd,
self.statistic,self.pvalue,
self.conf_int(alpha)), yname=yname,
xname=xname, use_t=use_t)
return summ
else:
# TODO: create something nicer
raise NotImplementedError('only available for t and z')
class Contrast(object):
"""
This class is used to construct contrast matrices in regression models.
They are specified by a (term, design) pair. The term, T, is a linear
combination of columns of the design matrix. The matrix attribute of
Contrast is a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D. Further, the matrix
Tnew = dot(C, D)
is full rank. The rank attribute is the rank of
dot(D, dot(pinv(D), T))
In a regression model, the contrast tests that E(dot(Tnew, Y)) = 0
for each column of Tnew.
Parameters
----------
term : array-like
design : array-like
Attributes
----------
contrast_matrix
Examples
--------
>>> import numpy.random as R
>>> import statsmodels.api as sm
>>> import numpy as np
>>> R.seed(54321)
>>> X = R.standard_normal((40,10))
Get a contrast
>>> new_term = np.column_stack((X[:,0], X[:,2]))
>>> c = sm.contrast.Contrast(new_term, X)
>>> test = [[1] + [0]*9, [0]*2 + [1] + [0]*7]
>>> np.allclose(c.contrast_matrix, test)
True
Get another contrast
>>> P = np.dot(X, np.linalg.pinv(X))
>>> resid = np.identity(40) - P
>>> noise = np.dot(resid,R.standard_normal((40,5)))
>>> new_term2 = np.column_stack((noise,X[:,2]))
>>> c2 = Contrast(new_term2, X)
>>> print(c2.contrast_matrix)
[ -1.26424750e-16 8.59467391e-17 1.56384718e-01 -2.60875560e-17
-7.77260726e-17 -8.41929574e-18 -7.36359622e-17 -1.39760860e-16
1.82976904e-16 -3.75277947e-18]
Get another contrast
>>> zero = np.zeros((40,))
>>> new_term3 = np.column_stack((zero,X[:,2]))
>>> c3 = sm.contrast.Contrast(new_term3, X)
>>> test2 = [0]*2 + [1] + [0]*7
>>> np.allclose(c3.contrast_matrix, test2)
True
"""
def _get_matrix(self):
"""
Gets the contrast_matrix property
"""
if not hasattr(self, "_contrast_matrix"):
self.compute_matrix()
return self._contrast_matrix
contrast_matrix = property(_get_matrix)
def __init__(self, term, design):
self.term = np.asarray(term)
self.design = np.asarray(design)
def compute_matrix(self):
"""
Construct a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D=design.
"""
T = self.term
if T.ndim == 1:
T = T[:,None]
self.T = clean0(T)
self.D = self.design
self._contrast_matrix = contrastfromcols(self.T, self.D)
try:
self.rank = self.matrix.shape[1]
except:
self.rank = 1
#TODO: fix docstring after usage is settled
def contrastfromcols(L, D, pseudo=None):
"""
From an n x p design matrix D and a matrix L, tries
to determine a p x q contrast matrix C which
determines a contrast of full rank, i.e. the
n x q matrix
dot(transpose(C), pinv(D))
is full rank.
L must satisfy either L.shape[0] == n or L.shape[1] == p.
If L.shape[0] == n, then L is thought of as representing
columns in the column space of D.
If L.shape[1] == p, then L is thought of as what is known
as a contrast matrix. In this case, this function returns an estimable
contrast corresponding to the dot(D, L.T)
Note that this always produces a meaningful contrast, not always
with the intended properties because q is always non-zero unless
L is identically 0. That is, it produces a contrast that spans
the column space of L (after projection onto the column space of D).
Parameters
----------
L : array-like
D : array-like
"""
L = np.asarray(L)
D = np.asarray(D)
n, p = D.shape
if L.shape[0] != n and L.shape[1] != p:
raise ValueError("shape of L and D mismatched")
if pseudo is None:
pseudo = np.linalg.pinv(D) # D^+ \approx= ((dot(D.T,D))^(-1),D.T)
if L.shape[0] == n:
C = np.dot(pseudo, L).T
else:
C = L
C = np.dot(pseudo, np.dot(D, C.T)).T
Lp = np.dot(D, C.T)
if len(Lp.shape) == 1:
Lp.shape = (n, 1)
if np_matrix_rank(Lp) != Lp.shape[1]:
Lp = fullrank(Lp)
C = np.dot(pseudo, Lp).T
return np.squeeze(C)
# TODO: this is currently a minimal version, stub
class WaldTestResults(object):
# for F and chi2 tests of joint hypothesis, mainly for vectorized
def __init__(self, statistic, distribution, dist_args, table=None,
pvalues=None):
self.table = table
self.distribution = distribution
self.statistic = statistic
#self.sd = sd
self.dist_args = dist_args
# The following is because I don't know which we want
if table is not None:
self.statistic = table['statistic'].values
self.pvalues = table['pvalue'].values
self.df_constraints = table['df_constraint'].values
if self.distribution == 'F':
self.df_denom = table['df_denom'].values
else:
if self.distribution is 'chi2':
self.dist = stats.chi2
self.df_constraints = self.dist_args[0] # assumes tuple
# using dist_args[0] is a bit dangerous,
elif self.distribution is 'F':
self.dist = stats.f
self.df_constraints, self.df_denom = self.dist_args
else:
raise ValueError('only F and chi2 are possible distribution')
if pvalues is None:
self.pvalues = self.dist.sf(np.abs(statistic), *dist_args)
else:
self.pvalues = pvalues
@property
def col_names(self):
"""column names for summary table
"""
pr_test = "P>%s" % self.distribution
col_names = [self.distribution, pr_test, 'df constraint']
if self.distribution == 'F':
col_names.append('df denom')
return col_names
def summary_frame(self):
# needs to be a method for consistency
if hasattr(self, '_dframe'):
return self._dframe
# rename the column nambes, but don't copy data
renaming = dict(zip(self.table.columns, self.col_names))
self.dframe = self.table.rename(columns=renaming)
return self.dframe
def __str__(self):
return self.summary_frame().to_string()
def __repr__(self):
return str(self.__class__) + '\n' + self.__str__()
|
bsd-3-clause
|
jefffohl/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_wx.py
|
69
|
77038
|
from __future__ import division
"""
backend_wx.py
A wxPython backend for matplotlib, based (very heavily) on
backend_template.py and backend_gtk.py
Author: Jeremy O'Donoghue ([email protected])
Derived from original copyright work by John Hunter
([email protected])
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
License: This work is licensed under a PSF compatible license. A copy
should be included with this source code.
"""
"""
KNOWN BUGS -
- Mousewheel (on Windows) only works after menu button has been pressed
at least once
- Mousewheel on Linux (wxGTK linked against GTK 1.2) does not work at all
- Vertical text renders horizontally if you use a non TrueType font
on Windows. This is a known wxPython issue. Work-around is to ensure
that you use a TrueType font.
- Pcolor demo puts chart slightly outside bounding box (approx 1-2 pixels
to the bottom left)
- Outputting to bitmap more than 300dpi results in some text being incorrectly
scaled. Seems to be a wxPython bug on Windows or font point sizes > 60, as
font size is correctly calculated.
- Performance poorer than for previous direct rendering version
- TIFF output not supported on wxGTK. This is a wxGTK issue
- Text is not anti-aliased on wxGTK. This is probably a platform
configuration issue.
- If a second call is made to show(), no figure is generated (#866965)
Not implemented:
- Printing
Fixed this release:
- Bug #866967: Interactive operation issues fixed [JDH]
- Bug #866969: Dynamic update does not function with backend_wx [JOD]
Examples which work on this release:
---------------------------------------------------------------
| Windows 2000 | Linux |
| wxPython 2.3.3 | wxPython 2.4.2.4 |
--------------------------------------------------------------|
- alignment_test.py | TBE | OK |
- arctest.py | TBE | (3) |
- axes_demo.py | OK | OK |
- axes_props.py | OK | OK |
- bar_stacked.py | TBE | OK |
- barchart_demo.py | OK | OK |
- color_demo.py | OK | OK |
- csd_demo.py | OK | OK |
- dynamic_demo.py | N/A | N/A |
- dynamic_demo_wx.py | TBE | OK |
- embedding_in_gtk.py | N/A | N/A |
- embedding_in_wx.py | OK | OK |
- errorbar_demo.py | OK | OK |
- figtext.py | OK | OK |
- histogram_demo.py | OK | OK |
- interactive.py | N/A (2) | N/A (2) |
- interactive2.py | N/A (2) | N/A (2) |
- legend_demo.py | OK | OK |
- legend_demo2.py | OK | OK |
- line_styles.py | OK | OK |
- log_demo.py | OK | OK |
- logo.py | OK | OK |
- mpl_with_glade.py | N/A (2) | N/A (2) |
- mri_demo.py | OK | OK |
- mri_demo_with_eeg.py | OK | OK |
- multiple_figs_demo.py | OK | OK |
- pcolor_demo.py | OK | OK |
- psd_demo.py | OK | OK |
- scatter_demo.py | OK | OK |
- scatter_demo2.py | OK | OK |
- simple_plot.py | OK | OK |
- stock_demo.py | OK | OK |
- subplot_demo.py | OK | OK |
- system_monitor.py | N/A (2) | N/A (2) |
- text_handles.py | OK | OK |
- text_themes.py | OK | OK |
- vline_demo.py | OK | OK |
---------------------------------------------------------------
(2) - Script uses GTK-specific features - cannot not run,
but wxPython equivalent should be written.
(3) - Clipping seems to be broken.
"""
cvs_id = '$Id: backend_wx.py 6484 2008-12-03 18:38:03Z jdh2358 $'
import sys, os, os.path, math, StringIO, weakref, warnings
import numpy as npy
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
if _DEBUG < 5:
import traceback, pdb
_DEBUG_lvls = {1 : 'Low ', 2 : 'Med ', 3 : 'High', 4 : 'Error' }
try:
import wx
backend_version = wx.VERSION_STRING
except:
raise ImportError("Matplotlib backend_wx requires wxPython be installed")
#!!! this is the call that is causing the exception swallowing !!!
#wx.InitAllImageHandlers()
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
cls = o.__class__
# Jeremy, often times the commented line won't print but the
# one below does. I think WX is redefining stderr, damned
# beast
#print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
print "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
traceback.print_exc(type, value, tb)
print
pdb.pm() # jdh uncomment
class fake_stderr:
"""Wx does strange things with stderr, as it makes the assumption that there
is probably no console. This redirects stderr to the console, since we know
that there is one!"""
def write(self, msg):
print "Stderr: %s\n\r" % msg
#if _DEBUG < 5:
# sys.excepthook = debug_on_error
# WxLogger =wx.LogStderr()
# sys.stderr = fake_stderr
# Event binding code changed after version 2.5
if wx.VERSION_STRING >= '2.5':
def bind(actor,event,action,**kw):
actor.Bind(event,action,**kw)
else:
def bind(actor,event,action,id=None):
if id is not None:
event(actor, id, action)
else:
event(actor,action)
import matplotlib
from matplotlib import verbose
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureCanvasBase, FigureManagerBase, NavigationToolbar2, \
cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.artist import Artist
from matplotlib.cbook import exception_to_str, is_string_like, is_writable_file_like
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.text import _process_text_args, Text
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
from matplotlib import rcParams
# the True dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog =wx.MessageDialog(parent = parent,
message = msg,
caption = 'Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
#In wxPython, drawing is performed on a wxDC instance, which will
#generally be mapped to the client aread of the window displaying
#the plot. Under wxPython, the wxDC instance has a wx.Pen which
#describes the colour and weight of any lines drawn, and a wxBrush
#which describes the fill colour of any closed polygon.
fontweights = {
100 : wx.LIGHT,
200 : wx.LIGHT,
300 : wx.LIGHT,
400 : wx.NORMAL,
500 : wx.NORMAL,
600 : wx.NORMAL,
700 : wx.BOLD,
800 : wx.BOLD,
900 : wx.BOLD,
'ultralight' : wx.LIGHT,
'light' : wx.LIGHT,
'normal' : wx.NORMAL,
'medium' : wx.NORMAL,
'semibold' : wx.NORMAL,
'bold' : wx.BOLD,
'heavy' : wx.BOLD,
'ultrabold' : wx.BOLD,
'black' : wx.BOLD
}
fontangles = {
'italic' : wx.ITALIC,
'normal' : wx.NORMAL,
'oblique' : wx.SLANT }
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = { 'Sans' : wx.SWISS,
'Roman' : wx.ROMAN,
'Script' : wx.SCRIPT,
'Decorative' : wx.DECORATIVE,
'Modern' : wx.MODERN,
'Courier' : wx.MODERN,
'courier' : wx.MODERN }
def __init__(self, bitmap, dpi):
"""
Initialise a wxWindows renderer instance.
"""
DEBUG_MSG("__init__()", 1, self)
if wx.VERSION_STRING < "2.8":
raise RuntimeError("matplotlib no longer supports wxPython < 2.8 for the Wx backend.\nYou may, however, use the WxAgg backend.")
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
#return 1, 1
if ismath: s = self.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0], self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
#@staticmethod
def convert_path(gfx_ctx, tpath):
wxpath = gfx_ctx.CreatePath()
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
convert_path = staticmethod(convert_path)
def draw_path(self, gc, path, transform, rgbFace=None):
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
tpath = transform.transform_path(path)
wxpath = self.convert_path(gfx_ctx, tpath)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
else:
l=0
b=0,
w=self.width
h=self.height
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
bitmap = wx.BitmapFromBufferRGBA(cols,rows,image_array)
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap,int(l),int(b),int(w),int(h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the matplotlib.text.Text instance
None)
"""
if ismath: s = self.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y-h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = angle / 180.0 * math.pi
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
"""
Return an instance of a GraphicsContextWx, and sets the current gc copy
"""
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc != None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font =wx.Font(int(size+0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
return points*(PIXELS_PER_INCH/72.0*self.dpi/72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = { 'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND }
_joind = { 'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND }
_dashd_wx = { 'solid': wx.SOLID,
'dashed': wx.SHORT_DASH,
'dashdot': wx.DOT_DASH,
'dotted': wx.DOT }
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
#assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self._style = wx.SOLID
self.renderer = renderer
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform=='win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""
Select a Null bitmasp into this wxDC instance
"""
if sys.platform=='win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGB=None):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGB)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_graylevel(self, frac):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
DEBUG_MSG("set_graylevel()", 1, self)
self.select()
GraphicsContextBase.set_graylevel(self, frac)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
"""
Set the line width.
"""
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if w>0 and w<1: w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw==0: lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linestyle(self, ls):
"""
Set the line style to be one of
"""
DEBUG_MSG("set_linestyle()", 1, self)
self.select()
GraphicsContextBase.set_linestyle(self, ls)
try:
self._style = GraphicsContextWx._dashd_wx[ls]
except KeyError:
self._style = wx.LONG_DASH# Style not used elsewhere...
# On MS Windows platform, only line width of 1 allowed for dash lines
if wx.Platform == '__WXMSW__':
self.set_linewidth(1)
self._pen.SetStyle(self._style)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b), alpha=int(a))
class FigureCanvasWx(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window probably
implements a wx.Sizer to control the displayed control size - but we give a
hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL : 'control',
wx.WXK_SHIFT : 'shift',
wx.WXK_ALT : 'alt',
wx.WXK_LEFT : 'left',
wx.WXK_UP : 'up',
wx.WXK_RIGHT : 'right',
wx.WXK_DOWN : 'down',
wx.WXK_ESCAPE : 'escape',
wx.WXK_F1 : 'f1',
wx.WXK_F2 : 'f2',
wx.WXK_F3 : 'f3',
wx.WXK_F4 : 'f4',
wx.WXK_F5 : 'f5',
wx.WXK_F6 : 'f6',
wx.WXK_F7 : 'f7',
wx.WXK_F8 : 'f8',
wx.WXK_F9 : 'f9',
wx.WXK_F10 : 'f10',
wx.WXK_F11 : 'f11',
wx.WXK_F12 : 'f12',
wx.WXK_SCROLL : 'scroll_lock',
wx.WXK_PAUSE : 'break',
wx.WXK_BACK : 'backspace',
wx.WXK_RETURN : 'enter',
wx.WXK_INSERT : 'insert',
wx.WXK_DELETE : 'delete',
wx.WXK_HOME : 'home',
wx.WXK_END : 'end',
wx.WXK_PRIOR : 'pageup',
wx.WXK_NEXT : 'pagedown',
wx.WXK_PAGEUP : 'pageup',
wx.WXK_PAGEDOWN : 'pagedown',
wx.WXK_NUMPAD0 : '0',
wx.WXK_NUMPAD1 : '1',
wx.WXK_NUMPAD2 : '2',
wx.WXK_NUMPAD3 : '3',
wx.WXK_NUMPAD4 : '4',
wx.WXK_NUMPAD5 : '5',
wx.WXK_NUMPAD6 : '6',
wx.WXK_NUMPAD7 : '7',
wx.WXK_NUMPAD8 : '8',
wx.WXK_NUMPAD9 : '9',
wx.WXK_NUMPAD_ADD : '+',
wx.WXK_NUMPAD_SUBTRACT : '-',
wx.WXK_NUMPAD_MULTIPLY : '*',
wx.WXK_NUMPAD_DIVIDE : '/',
wx.WXK_NUMPAD_DECIMAL : 'dec',
wx.WXK_NUMPAD_ENTER : 'enter',
wx.WXK_NUMPAD_UP : 'up',
wx.WXK_NUMPAD_RIGHT : 'right',
wx.WXK_NUMPAD_DOWN : 'down',
wx.WXK_NUMPAD_LEFT : 'left',
wx.WXK_NUMPAD_PRIOR : 'pageup',
wx.WXK_NUMPAD_NEXT : 'pagedown',
wx.WXK_NUMPAD_PAGEUP : 'pageup',
wx.WXK_NUMPAD_PAGEDOWN : 'pagedown',
wx.WXK_NUMPAD_HOME : 'home',
wx.WXK_NUMPAD_END : 'end',
wx.WXK_NUMPAD_INSERT : 'insert',
wx.WXK_NUMPAD_DELETE : 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l,b,w,h = figure.bbox.bounds
w = int(math.ceil(w))
h = int(math.ceil(h))
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
def do_nothing(*args, **kwargs):
warnings.warn('could not find a setinitialsize function for backend_wx; please report your wxpython version=%s to the matplotlib developers list'%backend_version)
pass
# try to find the set size func across wx versions
try:
getattr(self, 'SetInitialSize')
except AttributeError:
self.SetInitialSize = getattr(self, 'SetBestFittingSize', do_nothing)
if not hasattr(self,'IsShownOnScreen'):
self.IsShownOnScreen = getattr(self, 'IsVisible', lambda *args: True)
# Create the drawing bitmap
self.bitmap =wx.EmptyBitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w,h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
bind(self, wx.EVT_SIZE, self._onSize)
bind(self, wx.EVT_PAINT, self._onPaint)
bind(self, wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)
bind(self, wx.EVT_KEY_DOWN, self._onKeyDown)
bind(self, wx.EVT_KEY_UP, self._onKeyUp)
bind(self, wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
bind(self, wx.EVT_RIGHT_DCLICK, self._onRightButtonDown)
bind(self, wx.EVT_RIGHT_UP, self._onRightButtonUp)
bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)
bind(self, wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
bind(self, wx.EVT_LEFT_DCLICK, self._onLeftButtonDown)
bind(self, wx.EVT_LEFT_UP, self._onLeftButtonUp)
bind(self, wx.EVT_MOTION, self._onMotion)
bind(self, wx.EVT_LEAVE_WINDOW, self._onLeave)
bind(self, wx.EVT_ENTER_WINDOW, self._onEnter)
bind(self, wx.EVT_IDLE, self._onIdle)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.macros = {} # dict from wx id to seq of macros
self.Printer_Init()
def Destroy(self, *args, **kwargs):
wx.Panel.Destroy(self, *args, **kwargs)
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
wx.TheClipboard.Open()
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
def Printer_Init(self):
"""initialize printer settings using wx methods"""
self.printerData = wx.PrintData()
self.printerData.SetPaperId(wx.PAPER_LETTER)
self.printerData.SetPrintMode(wx.PRINT_MODE_PRINTER)
self.printerPageData= wx.PageSetupDialogData()
self.printerPageData.SetMarginBottomRight((25,25))
self.printerPageData.SetMarginTopLeft((25,25))
self.printerPageData.SetPrintData(self.printerData)
self.printer_width = 5.5
self.printer_margin= 0.5
def Printer_Setup(self, event=None):
"""set up figure for printing. The standard wx Printer
Setup Dialog seems to die easily. Therefore, this setup
simply asks for image width and margin for printing. """
dmsg = """Width of output figure in inches.
The current aspect ration will be kept."""
dlg = wx.Dialog(self, -1, 'Page Setup for Printing' , (-1,-1))
df = dlg.GetFont()
df.SetWeight(wx.NORMAL)
df.SetPointSize(11)
dlg.SetFont(df)
x_wid = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_width, size=(70,-1))
x_mrg = wx.TextCtrl(dlg,-1,value="%.2f" % self.printer_margin,size=(70,-1))
sizerAll = wx.BoxSizer(wx.VERTICAL)
sizerAll.Add(wx.StaticText(dlg,-1,dmsg),
0, wx.ALL | wx.EXPAND, 5)
sizer = wx.FlexGridSizer(0,3)
sizerAll.Add(sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(wx.StaticText(dlg,-1,'Figure Width'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(x_wid,
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'in'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'Margin'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(x_mrg,
1, wx.ALIGN_LEFT|wx.ALL, 2)
sizer.Add(wx.StaticText(dlg,-1,'in'),
1, wx.ALIGN_LEFT|wx.ALL, 2)
btn = wx.Button(dlg,wx.ID_OK, " OK ")
btn.SetDefault()
sizer.Add(btn, 1, wx.ALIGN_LEFT, 5)
btn = wx.Button(dlg,wx.ID_CANCEL, " CANCEL ")
sizer.Add(btn, 1, wx.ALIGN_LEFT, 5)
dlg.SetSizer(sizerAll)
dlg.SetAutoLayout(True)
sizerAll.Fit(dlg)
if dlg.ShowModal() == wx.ID_OK:
try:
self.printer_width = float(x_wid.GetValue())
self.printer_margin = float(x_mrg.GetValue())
except:
pass
if ((self.printer_width + self.printer_margin) > 7.5):
self.printerData.SetOrientation(wx.LANDSCAPE)
else:
self.printerData.SetOrientation(wx.PORTRAIT)
dlg.Destroy()
return
def Printer_Setup2(self, event=None):
"""set up figure for printing. Using the standard wx Printer
Setup Dialog. """
if hasattr(self, 'printerData'):
data = wx.PageSetupDialogData()
data.SetPrintData(self.printerData)
else:
data = wx.PageSetupDialogData()
data.SetMarginTopLeft( (15, 15) )
data.SetMarginBottomRight( (15, 15) )
dlg = wx.PageSetupDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetPageSetupData()
tl = data.GetMarginTopLeft()
br = data.GetMarginBottomRight()
self.printerData = wx.PrintData(data.GetPrintData())
dlg.Destroy()
def Printer_Preview(self, event=None):
""" generate Print Preview with wx Print mechanism"""
po1 = PrintoutWx(self, width=self.printer_width,
margin=self.printer_margin)
po2 = PrintoutWx(self, width=self.printer_width,
margin=self.printer_margin)
self.preview = wx.PrintPreview(po1,po2,self.printerData)
if not self.preview.Ok(): print "error with preview"
self.preview.SetZoom(50)
frameInst= self
while not isinstance(frameInst, wx.Frame):
frameInst= frameInst.GetParent()
frame = wx.PreviewFrame(self.preview, frameInst, "Preview")
frame.Initialize()
frame.SetPosition(self.GetPosition())
frame.SetSize((850,650))
frame.Centre(wx.BOTH)
frame.Show(True)
self.gui_repaint()
def Printer_Print(self, event=None):
""" Print figure using wx Print mechanism"""
pdd = wx.PrintDialogData()
# SetPrintData for 2.4 combatibility
pdd.SetPrintData(self.printerData)
pdd.SetToPage(1)
printer = wx.Printer(pdd)
printout = PrintoutWx(self, width=int(self.printer_width),
margin=int(self.printer_margin))
print_ok = printer.Print(self, printout, True)
if wx.VERSION_STRING >= '2.5':
if not print_ok and not printer.GetLastError() == wx.PRINTER_CANCELLED:
wx.MessageBox("""There was a problem printing.
Perhaps your current printer is not set correctly?""",
"Printing", wx.OK)
else:
if not print_ok:
wx.MessageBox("""There was a problem printing.
Perhaps your current printer is not set correctly?""",
"Printing", wx.OK)
printout.Destroy()
self.gui_repaint()
def draw_idle(self):
"""
Delay rendering until the GUI is idle.
"""
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Create a timer for handling draw_idle requests
# If there are events pending when the timer is
# complete, reset the timer and continue. The
# alternative approach, binding to wx.EVT_IDLE,
# doesn't behave as nicely.
if hasattr(self,'_idletimer'):
self._idletimer.Restart(IDLE_DELAY)
else:
self._idletimer = wx.FutureCall(IDLE_DELAY,self._onDrawIdle)
# FutureCall is a backwards-compatible alias;
# CallLater became available in 2.7.1.1.
def _onDrawIdle(self, *args, **kwargs):
if wx.GetApp().Pending():
self._idletimer.Restart(IDLE_DELAY, *args, **kwargs)
else:
del self._idletimer
# GUI event or explicit draw call may already
# have caused the draw to take place
if not self._isDrawn:
self.draw(*args, **kwargs)
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def flush_events(self):
wx.Yield()
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
Call signature::
start_event_loop(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout*1000, oneShot=True)
bind(self, wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wx.EventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
if hasattr(self,'_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied device context. If drawDC is None, a ClientDC will be used to
redraw the image.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if drawDC is None:
drawDC=wx.ClientDC(self)
drawDC.BeginDrawing()
drawDC.DrawBitmap(self.bitmap, 0, 0)
drawDC.EndDrawing()
#wx.GetApp().Yield()
else:
pass
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['pcx'] = 'PCX'
filetypes['png'] = 'Portable Network Graphics'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
filetypes['xpm'] = 'X pixmap'
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasBase.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG, *args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF, *args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l,b,width,height = self.figure.bbox.bounds
width = int(math.ceil(width))
height = int(math.ceil(height))
self.bitmap = wx.EmptyBitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# Now that we have rendered into the bitmap, save it
# to the appropriate file type and clean up
if is_string_like(filename):
if not self.bitmap.SaveFile(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError('Could not save figure to %s\n' % (filename))
elif is_writable_file_like(filename):
if not self.bitmap.ConvertToImage().SaveStream(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError('Could not save figure to %s\n' % (filename))
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
def get_default_filetype(self):
return 'png'
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
evt.Skip()
def _onEraseBackground(self, evt):
"""
Called when window is redrawn; since we are blitting the entire
image, we can leave this blank to suppress flicker.
"""
pass
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
# Create a new, correctly sized bitmap
self._width, self._height = self.GetClientSize()
self.bitmap =wx.EmptyBitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1: return # Empty figure
dpival = self.figure.dpi
winch = self._width/dpival
hinch = self._height/dpival
self.figure.set_size_inches(winch, hinch)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
def _get_key(self, evt):
keyval = evt.m_keyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval <256:
key = chr(keyval)
else:
key = None
# why is wx upcasing this?
if key is not None: key = key.lower()
return key
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt)
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
#print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
if self.HasCapture(): self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self.CaptureMouse()
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
#print 'release button', 1
evt.Skip()
if self.HasCapture(): self.ReleaseMouse()
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
#print "delta,rotation,rate",delta,rotation,rate
step = rate*float(rotation)/delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self,'_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent = evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
FigureCanvasBase.enter_notify_event(self, guiEvent = evt)
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
def _create_wx_app():
"""
Creates a wx.PySimpleApp instance if a wx.App has not been created.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.PySimpleApp()
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show():
"""
Current implementation assumes that matplotlib is executed in a PyCrust
shell. It appears to be possible to execute wxPython applications from
within a PyCrust without having to ensure that wxPython has been created
in a secondary thread (e.g. SciPy gui_thread).
Unfortunately, gui_thread seems to introduce a number of further
dependencies on SciPy modules, which I do not wish to introduce
into the backend at this point. If there is a need I will look
into this in a later release.
"""
DEBUG_MSG("show()", 3, None)
for figwin in Gcf.get_all_fig_managers():
figwin.frame.Show()
if show._needmain and not matplotlib.is_interactive():
# start the wxPython gui event if there is not already one running
wxapp = wx.GetApp()
if wxapp is not None:
# wxPython 2.4 has no wx.App.IsMainLoopRunning() method
imlr = getattr(wxapp, 'IsMainLoopRunning', lambda: False)
if not imlr():
wxapp.MainLoop()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
frame = FigureFrameWx(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
return figmgr
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos =wx.Point(20,20)
l,b,w,h = fig.bbox.bounds
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
statbar = StatusBarWx(self)
self.SetStatusBar(statbar)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.sizer =wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolbar = self._get_toolbar(statbar)
if self.toolbar is not None:
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
# Mac platform (OSX 10.3, MacPython) does not seem to cope with
# having a toolbar in a sizer. This work-around gets the buttons
# back, but at the expense of having the toolbar at the top
self.SetToolBar(self.toolbar)
else:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.figmgr = FigureManagerWx(self.canvas, num, self)
bind(self, wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbarWx(self.canvas, True)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
#self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
NB: FigureManagerBase is found in _pylab_helpers
public attrs
canvas - a FigureCanvasWx(wx.Panel) instance
window - a wxFrame instance - http://www.lpthe.jussieu.fr/~zeitlin/wxWindows/docs/wxwin_wxframe.html#wxframe
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
self.toolbar = self.tb # consistent with other backends
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb != None: self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def showfig(*args):
frame.Show()
# attach a show method to the figure
self.canvas.figure.show = showfig
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
#if self.tb is not None: self.tb.Destroy()
import wx
#wx.GetApp().ProcessIdle()
wx.WakeUpIdle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
# Identifiers for toolbar controls - images_wx contains bitmaps for the images
# used in the controls. wxWindows does not provide any stock images, so I've
# 'stolen' those from GTK2, and transformed them into the appropriate format.
#import images_wx
_NTB_AXISMENU =wx.NewId()
_NTB_AXISMENU_BUTTON =wx.NewId()
_NTB_X_PAN_LEFT =wx.NewId()
_NTB_X_PAN_RIGHT =wx.NewId()
_NTB_X_ZOOMIN =wx.NewId()
_NTB_X_ZOOMOUT =wx.NewId()
_NTB_Y_PAN_UP =wx.NewId()
_NTB_Y_PAN_DOWN =wx.NewId()
_NTB_Y_ZOOMIN =wx.NewId()
_NTB_Y_ZOOMOUT =wx.NewId()
#_NTB_SUBPLOT =wx.NewId()
_NTB_SAVE =wx.NewId()
_NTB_CLOSE =wx.NewId()
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'],'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying'%bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu =wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId =wx.NewId()
self._invertId =wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected", False)
self._menu.AppendSeparator()
bind(self, wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
bind(self, wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
bind(self, wx.EVT_MENU, self._handleInvertAxesSelected, id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y+h-4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0: return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
menuId =wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i, "Select axis %d" % i, True)
self._menu.Check(menuId, True)
bind(self, wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
self._toolbar.set_active(range(len(self._axisId)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button"""
axis_txt = ''
for e in lst:
axis_txt += '%d,' % (e+1)
# remove trailing ',' and add to button string
self.SetLabel("Axes: %s" % axis_txt[:-1])
cursord = {
cursors.MOVE : wx.CURSOR_HAND,
cursors.HAND : wx.CURSOR_HAND,
cursors.POINTER : wx.CURSOR_ARROW,
cursors.SELECT_REGION : wx.CURSOR_CROSS,
}
class SubplotToolWX(wx.Frame):
def __init__(self, targetfig):
wx.Frame.__init__(self, None, -1, "Configure subplots")
toolfig = Figure((6,3))
canvas = FigureCanvasWx(self, -1, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, self)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
self.SetSizer(sizer)
self.Fit()
tool = SubplotTool(targetfig, toolfig)
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
def get_canvas(self, frame, fig):
return FigureCanvasWx(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
_NTB2_HOME =wx.NewId()
self._NTB2_BACK =wx.NewId()
self._NTB2_FORWARD =wx.NewId()
self._NTB2_PAN =wx.NewId()
self._NTB2_ZOOM =wx.NewId()
_NTB2_SAVE = wx.NewId()
_NTB2_SUBPLOT =wx.NewId()
self.SetToolBitmapSize(wx.Size(24,24))
self.AddSimpleTool(_NTB2_HOME, _load_bitmap('home.png'),
'Home', 'Reset original view')
self.AddSimpleTool(self._NTB2_BACK, _load_bitmap('back.png'),
'Back', 'Back navigation view')
self.AddSimpleTool(self._NTB2_FORWARD, _load_bitmap('forward.png'),
'Forward', 'Forward navigation view')
# todo: get new bitmap
self.AddCheckTool(self._NTB2_PAN, _load_bitmap('move.png'),
shortHelp='Pan',
longHelp='Pan with left, zoom with right')
self.AddCheckTool(self._NTB2_ZOOM, _load_bitmap('zoom_to_rect.png'),
shortHelp='Zoom', longHelp='Zoom to rectangle')
self.AddSeparator()
self.AddSimpleTool(_NTB2_SUBPLOT, _load_bitmap('subplots.png'),
'Configure subplots', 'Configure subplot parameters')
self.AddSimpleTool(_NTB2_SAVE, _load_bitmap('filesave.png'),
'Save', 'Save plot contents to file')
bind(self, wx.EVT_TOOL, self.home, id=_NTB2_HOME)
bind(self, wx.EVT_TOOL, self.forward, id=self._NTB2_FORWARD)
bind(self, wx.EVT_TOOL, self.back, id=self._NTB2_BACK)
bind(self, wx.EVT_TOOL, self.zoom, id=self._NTB2_ZOOM)
bind(self, wx.EVT_TOOL, self.pan, id=self._NTB2_PAN)
bind(self, wx.EVT_TOOL, self.configure_subplot, id=_NTB2_SUBPLOT)
bind(self, wx.EVT_TOOL, self.save, id=_NTB2_SAVE)
self.Realize()
def zoom(self, *args):
self.ToggleTool(self._NTB2_PAN, False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self._NTB2_ZOOM, False)
NavigationToolbar2.pan(self, *args)
def configure_subplot(self, evt):
frame = wx.Frame(None, -1, "Configure subplots")
toolfig = Figure((6,3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
tool = SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save(self, evt):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = "image." + self.canvas.get_default_filetype()
dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
filetypes,
wx.SAVE|wx.OVERWRITE_PROMPT|wx.CHANGE_DIR)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG('Save file dir:%s name:%s' % (dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format!=ext:
#looks like they forgot to set the image type drop
#down, going with the extension.
warnings.warn('extension %s did not match the selected image type %s; going with %s'%(ext, format, ext), stacklevel=0)
format = ext
try:
self.canvas.print_figure(
os.path.join(dirname, filename), format=format)
except Exception, e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor =wx.StockCursor(cursord[cursor])
self.canvas.SetCursor( cursor )
def release(self, event):
try: del self.lastrect
except AttributeError: pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
canvas = self.canvas
dc =wx.ClientDC(canvas)
# Set logical function to XOR for rubberbanding
dc.SetLogicalFunction(wx.XOR)
# Set dc brush and pen
# Here I set brush and pen to white and grey respectively
# You can set it to your own choices
# The brush setting is not really needed since we
# dont do any filling of the dc. It is set just for
# the sake of completion.
wbrush =wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT)
wpen =wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID)
dc.SetBrush(wbrush)
dc.SetPen(wpen)
dc.ResetBoundingBox()
dc.BeginDrawing()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = int(x0), int(y0), int(w), int(h)
try: lastrect = self.lastrect
except AttributeError: pass
else: dc.DrawRectangle(*lastrect) #erase last
self.lastrect = rect
dc.DrawRectangle(*rect)
dc.EndDrawing()
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None: self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = (self._views._pos > 0)
can_forward = (self._views._pos < len(self._views._elements) - 1)
self.EnableTool(self._NTB2_BACK, can_backward)
self.EnableTool(self._NTB2_FORWARD, can_forward)
class NavigationToolbarWx(wx.ToolBar):
def __init__(self, canvas, can_kill=False):
"""
figure is the Figure instance that the toolboar controls
win, if not None, is the wxWindow the Figure is embedded in
"""
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
DEBUG_MSG("__init__()", 1, self)
self.canvas = canvas
self._lastControl = None
self._mouseOnButton = None
self._parent = canvas.GetParent()
self._NTB_BUTTON_HANDLER = {
_NTB_X_PAN_LEFT : self.panx,
_NTB_X_PAN_RIGHT : self.panx,
_NTB_X_ZOOMIN : self.zoomx,
_NTB_X_ZOOMOUT : self.zoomy,
_NTB_Y_PAN_UP : self.pany,
_NTB_Y_PAN_DOWN : self.pany,
_NTB_Y_ZOOMIN : self.zoomy,
_NTB_Y_ZOOMOUT : self.zoomy }
self._create_menu()
self._create_controls(can_kill)
self.Realize()
def _create_menu(self):
"""
Creates the 'menu' - implemented as a button which opens a
pop-up menu since wxPython does not allow a menu as a control
"""
DEBUG_MSG("_create_menu()", 1, self)
self._menu = MenuButtonWx(self)
self.AddControl(self._menu)
self.AddSeparator()
def _create_controls(self, can_kill):
"""
Creates the button controls, and links them to event handlers
"""
DEBUG_MSG("_create_controls()", 1, self)
# Need the following line as Windows toolbars default to 15x16
self.SetToolBitmapSize(wx.Size(16,16))
self.AddSimpleTool(_NTB_X_PAN_LEFT, _load_bitmap('stock_left.xpm'),
'Left', 'Scroll left')
self.AddSimpleTool(_NTB_X_PAN_RIGHT, _load_bitmap('stock_right.xpm'),
'Right', 'Scroll right')
self.AddSimpleTool(_NTB_X_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'),
'Zoom in', 'Increase X axis magnification')
self.AddSimpleTool(_NTB_X_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'),
'Zoom out', 'Decrease X axis magnification')
self.AddSeparator()
self.AddSimpleTool(_NTB_Y_PAN_UP,_load_bitmap('stock_up.xpm'),
'Up', 'Scroll up')
self.AddSimpleTool(_NTB_Y_PAN_DOWN, _load_bitmap('stock_down.xpm'),
'Down', 'Scroll down')
self.AddSimpleTool(_NTB_Y_ZOOMIN, _load_bitmap('stock_zoom-in.xpm'),
'Zoom in', 'Increase Y axis magnification')
self.AddSimpleTool(_NTB_Y_ZOOMOUT, _load_bitmap('stock_zoom-out.xpm'),
'Zoom out', 'Decrease Y axis magnification')
self.AddSeparator()
self.AddSimpleTool(_NTB_SAVE, _load_bitmap('stock_save_as.xpm'),
'Save', 'Save plot contents as images')
self.AddSeparator()
bind(self, wx.EVT_TOOL, self._onLeftScroll, id=_NTB_X_PAN_LEFT)
bind(self, wx.EVT_TOOL, self._onRightScroll, id=_NTB_X_PAN_RIGHT)
bind(self, wx.EVT_TOOL, self._onXZoomIn, id=_NTB_X_ZOOMIN)
bind(self, wx.EVT_TOOL, self._onXZoomOut, id=_NTB_X_ZOOMOUT)
bind(self, wx.EVT_TOOL, self._onUpScroll, id=_NTB_Y_PAN_UP)
bind(self, wx.EVT_TOOL, self._onDownScroll, id=_NTB_Y_PAN_DOWN)
bind(self, wx.EVT_TOOL, self._onYZoomIn, id=_NTB_Y_ZOOMIN)
bind(self, wx.EVT_TOOL, self._onYZoomOut, id=_NTB_Y_ZOOMOUT)
bind(self, wx.EVT_TOOL, self._onSave, id=_NTB_SAVE)
bind(self, wx.EVT_TOOL_ENTER, self._onEnterTool, id=self.GetId())
if can_kill:
bind(self, wx.EVT_TOOL, self._onClose, id=_NTB_CLOSE)
bind(self, wx.EVT_MOUSEWHEEL, self._onMouseWheel)
def set_active(self, ind):
"""
ind is a list of index numbers for the axes which are to be made active
"""
DEBUG_MSG("set_active()", 1, self)
self._ind = ind
if ind != None:
self._active = [ self._axes[i] for i in self._ind ]
else:
self._active = []
# Now update button text wit active axes
self._menu.updateButtonText(ind)
def get_last_control(self):
"""Returns the identity of the last toolbar button pressed."""
return self._lastControl
def panx(self, direction):
DEBUG_MSG("panx()", 1, self)
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def pany(self, direction):
DEBUG_MSG("pany()", 1, self)
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def zoomx(self, in_out):
DEBUG_MSG("zoomx()", 1, self)
for a in self._active:
a.xaxis.zoom(in_out)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def zoomy(self, in_out):
DEBUG_MSG("zoomy()", 1, self)
for a in self._active:
a.yaxis.zoom(in_out)
self.canvas.draw()
self.canvas.Refresh(eraseBackground=False)
def update(self):
"""
Update the toolbar menu - called when (e.g.) a new subplot or axes are added
"""
DEBUG_MSG("update()", 1, self)
self._axes = self.canvas.figure.get_axes()
self._menu.updateAxes(len(self._axes))
def _do_nothing(self, d):
"""A NULL event handler - does nothing whatsoever"""
pass
# Local event handlers - mainly supply parameters to pan/scroll functions
def _onEnterTool(self, evt):
toolId = evt.GetSelection()
try:
self.button_fn = self._NTB_BUTTON_HANDLER[toolId]
except KeyError:
self.button_fn = self._do_nothing
evt.Skip()
def _onLeftScroll(self, evt):
self.panx(-1)
evt.Skip()
def _onRightScroll(self, evt):
self.panx(1)
evt.Skip()
def _onXZoomIn(self, evt):
self.zoomx(1)
evt.Skip()
def _onXZoomOut(self, evt):
self.zoomx(-1)
evt.Skip()
def _onUpScroll(self, evt):
self.pany(1)
evt.Skip()
def _onDownScroll(self, evt):
self.pany(-1)
evt.Skip()
def _onYZoomIn(self, evt):
self.zoomy(1)
evt.Skip()
def _onYZoomOut(self, evt):
self.zoomy(-1)
evt.Skip()
def _onMouseEnterButton(self, button):
self._mouseOnButton = button
def _onMouseLeaveButton(self, button):
if self._mouseOnButton == button:
self._mouseOnButton = None
def _onMouseWheel(self, evt):
if evt.GetWheelRotation() > 0:
direction = 1
else:
direction = -1
self.button_fn(direction)
_onSave = NavigationToolbar2Wx.save
def _onClose(self, evt):
self.GetParent().Destroy()
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
self.SetStatusText("None", 1)
#self.SetStatusText("Measurement: None", 2)
#self.Reposition()
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
#def set_measurement(self, string):
# self.SetStatusText("Measurement: %s" % string, 2)
#< Additions for printing support: Matt Newville
class PrintoutWx(wx.Printout):
"""Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5,margin=0.5, title='matplotlib'):
wx.Printout.__init__(self,title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
#current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw,pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw,pgh) = self.GetPageSizePixels() # page size in pixels
(dcw,dch) = dc.GetSize()
(grw,grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth( int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight( int(self.canvas.bitmap.GetHeight()* vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview(): page_scale = float(dcw)/pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale)/float(grw)
dc.SetDeviceOrigin(left_margin,top_margin)
dc.SetUserScale(user_scale,user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
#>
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
Toolbar = NavigationToolbarWx
FigureManager = FigureManagerWx
|
gpl-3.0
|
zycdragonball/tensorflow
|
tensorflow/python/estimator/inputs/queues/feeding_functions.py
|
46
|
15782
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
|
apache-2.0
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/linear_model/ridge.py
|
13
|
51357
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :class:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y, centered_kernel=True):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
# the following emulates an additional constant regressor
# corresponding to fit_intercept=True
# but this is done only when the features have been centered
if centered_kernel:
K += np.ones_like(K)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1. / (v + alpha)
constant_column = np.var(Q, 0) < 1.e-12
# detect constant columns
w[constant_column] = 0 # cancel the regularization for the intercept
w[v == 0] = 0
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y, centered_kernel=True):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
if centered_kernel:
X = np.hstack((X, np.ones((X.shape[0], 1))))
# to emulate fit_intercept=True situation, add a column on ones
# Note that by centering, the other columns are orthogonal to that one
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
constant_column = np.var(U, 0) < 1.e-12
# detect columns colinear to ones
w = ((v + alpha) ** -1) - (alpha ** -1)
w[constant_column] = - (alpha ** -1)
# cancel the regularization for the intercept
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
centered_kernel = not sparse.issparse(X) and self.fit_intercept
v, Q, QT_y = _pre_compute(X, y, centered_kernel)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
if error:
out, c = _errors(alpha, y, v, Q, QT_y)
else:
out, c = _values(alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv,
scoring=self.scoring)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used, else,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
|
mit
|
geojames/Dart_EnvGIS
|
Week5-1_DataImport.py
|
1
|
6099
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
__author__ = 'James T. Dietrich'
__contact__ = '[email protected]'
__copyright__ = '(c) James Dietrich 2016'
__license__ = 'MIT'
__date__ = 'Wed Nov 16 11:33:39 2016'
__version__ = '1.0'
__status__ = "initial release"
__url__ = "https://github.com/geojames/..."
"""
Name: Week5-1_DataImport.py
Compatibility: Python 3.5
Description: This program does stuff
URL: https://github.com/geojames/...
Requires: libraries
Dev ToDo:
AUTHOR: James T. Dietrich
ORGANIZATION: Dartmouth College
Contact: [email protected]
Copyright: (c) James Dietrich 2016
"""
#------------------------------------------------------------------------------
# !!!!!!!!!!!!!!!
#
# Before we begin...
# you must create a standardized places for all of your code and the data
# I suggest a new folder in your "documents" folder called Python
# ON PC: C:\users\<your name>\Documents\Python
# ON MAC: /Users/<your name>/Documents/Python
#
# Inside that folder you can start new folders for different labs/projects
# For this exercise, create a folder called: DataIO
#
# !!!!!!!!!!!!!!!
#%% IMPORTING DATA with CSV Library
import os
import csv
from datetime import datetime
# Change the current working directory to our Data folder
# WINDOWS
os.chdir("C:/Users/James//Documents/GitHub/Dart_EnvGIS/Data/")
# MAC
os.chdir("/Users/ryanmckeon/GitHub/Dart_EnvGIS/Data/")
# These are programatic calls that will work when running a script. If you
# are just typing into the console you can use Unix commands directly without
# calling the OS library... ex: cd /Users/ryanmckeon/Documents/pyhton
# create blank lists to hold the data
dates = []
flows = []
# Opens file as read only. csv.reader reads each line in the file and
# automatically creates a LIST of each row in the file
# the indicies in the list correspond to the values in between the commas
# in Lees_Ferry_short.csv, the values for each line are date and flow
# 2015-01-01,12800
#
# OK for known inputs with known length
# Literally interprets data as strings, so conversion is nessesary
with open("Lees_Ferry_flow_Short_no_header.csv","r") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
dates.append(row[0]) # dates.append(datetime.strptime(row[0],"%Y-%M-%d"))
flows.append(row[1]) # flows.append(float(row[1]))
# type conversion (inline conversions above)
date_conv = []
flows_conv = []
for val in dates:
date_conv.append(datetime.strptime(val,"%m/%d/%y"))
for flow in flows:
flows_conv.append(float(flow))
#%% IMPORTING DATA with Numpy loadtxt
# Numpy's loadtxt function looks for TAB delimiters and a header row with a # sign
# for the population.txt sample file:
# #year hare lynx carrot
# 1900 30e3 4e3 48300
# 1901 47.2e3 6.1e3 48200
#
# loadtxt will create a 2-D array of the data in the txt file
# it's slightly stupid and will load everything as one data type
# *** i.e. the first datatype it encounters
# You could separate the columns to individual 1-D arrays and switch
# the datatypes after import
import os
import numpy as np
data = np.loadtxt('populations.txt')
#%% IMPORTING DATA with Numpy genfromtxt
# genfromtxt gives more options for importing different data types
# see the documentation for all fo the options:
# http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.genfromtxt.html
#
# you can specifiy a delimiter, data types, and basic names for the data in the
# array
# If you use the names=True option it will create a structured array where we
# can handle different data types
import os
import numpy as np
data2 = np.genfromtxt('populations_w_names.txt',delimiter="\t", names=True)
#%% IMPORTING DATA with Pandas
#
# Pandas offers the easiest way to import data from a variety of file formats
# read_table - reads tab delimited files
# read_csv - reads comma delimited files
# read_excel - reads excel files/tables
#
# The outputs will be in a Pandas data frame
# for read_csv Pandas will do it's best to guess data types
# There are lot's on options for this function:
# http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table
import os
import pandas as pd
# weather data
weather = pd.read_csv("KVTNORWI2_2016-12-1_2017-01-26.csv")
# Trying our Stream Flow data again
flow_data = pd.read_csv("Lees_Ferry_flow_Short.csv")
#%% parsing dates/times with PANDAS
import os
import pandas as pd
weather_date = pd.read_csv("KVTNORWI2_2016-12-1_2017-01-26.csv", parse_dates=["Time"])
flow_data_date = pd.read_csv("Lees_Ferry_flow_Short.csv", parse_dates=["date"])
# or using column indeices
weather = pd.read_csv("KVTNORWI2_2016-12-1_2017-01-26.csv", parse_dates=[0])
#%% Cheaters file paths
# Not working well on a Mac -- Ryan
# the libaray Tkinter is a GUI (graphical user interface) that can be used to
# build entire GUI applications. But there is a quick and easy file picker
# that we can use to choose a file easily and like you're used to in
# other applications. The window just
import os
import pandas as pd
import tkinter as tk
from tkinter import filedialog as fd
# start tkinter for file choosing
app = tk.Tk()
# use the file dialog call to get a file name
# Options:
# title - text at the top of the window
# file types - filter for specific file types
# initialdir - the starting directory
target_file = fd.askopenfilename(title='Open a file',
filetypes=[('Comma-Delimited Files (*.csv)',
'*.csv')],initialdir=os.getcwd())
print(target_file)
data = pd.read_csv(target_file)
# clean up the tkinter app
app.destroy()
|
mit
|
bgroveben/python3_machine_learning_projects
|
oreilly_GANs_for_beginners/introduction_to_ml_with_python/mglearn/mglearn/plot_pca.py
|
6
|
4754
|
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
from sklearn.externals.joblib import Memory
memory = Memory(cachedir="cache")
def plot_pca_illustration():
rnd = np.random.RandomState(5)
X_ = rnd.normal(size=(300, 2))
X_blob = np.dot(X_, rnd.normal(size=(2, 2))) + rnd.normal(size=2)
pca = PCA()
pca.fit(X_blob)
X_pca = pca.transform(X_blob)
S = X_pca.std(axis=0)
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
axes = axes.ravel()
axes[0].set_title("Original data")
axes[0].scatter(X_blob[:, 0], X_blob[:, 1], c=X_pca[:, 0], linewidths=0,
s=60, cmap='viridis')
axes[0].set_xlabel("feature 1")
axes[0].set_ylabel("feature 2")
axes[0].arrow(pca.mean_[0], pca.mean_[1], S[0] * pca.components_[0, 0],
S[0] * pca.components_[0, 1], width=.1, head_width=.3,
color='k')
axes[0].arrow(pca.mean_[0], pca.mean_[1], S[1] * pca.components_[1, 0],
S[1] * pca.components_[1, 1], width=.1, head_width=.3,
color='k')
axes[0].text(-1.5, -.5, "Component 2", size=14)
axes[0].text(-4, -4, "Component 1", size=14)
axes[0].set_aspect('equal')
axes[1].set_title("Transformed data")
axes[1].scatter(X_pca[:, 0], X_pca[:, 1], c=X_pca[:, 0], linewidths=0,
s=60, cmap='viridis')
axes[1].set_xlabel("First principal component")
axes[1].set_ylabel("Second principal component")
axes[1].set_aspect('equal')
axes[1].set_ylim(-8, 8)
pca = PCA(n_components=1)
pca.fit(X_blob)
X_inverse = pca.inverse_transform(pca.transform(X_blob))
axes[2].set_title("Transformed data w/ second component dropped")
axes[2].scatter(X_pca[:, 0], np.zeros(X_pca.shape[0]), c=X_pca[:, 0],
linewidths=0, s=60, cmap='viridis')
axes[2].set_xlabel("First principal component")
axes[2].set_aspect('equal')
axes[2].set_ylim(-8, 8)
axes[3].set_title("Back-rotation using only first component")
axes[3].scatter(X_inverse[:, 0], X_inverse[:, 1], c=X_pca[:, 0],
linewidths=0, s=60, cmap='viridis')
axes[3].set_xlabel("feature 1")
axes[3].set_ylabel("feature 2")
axes[3].set_aspect('equal')
axes[3].set_xlim(-8, 4)
axes[3].set_ylim(-8, 4)
def plot_pca_whitening():
rnd = np.random.RandomState(5)
X_ = rnd.normal(size=(300, 2))
X_blob = np.dot(X_, rnd.normal(size=(2, 2))) + rnd.normal(size=2)
pca = PCA(whiten=True)
pca.fit(X_blob)
X_pca = pca.transform(X_blob)
fig, axes = plt.subplots(1, 2, figsize=(10, 10))
axes = axes.ravel()
axes[0].set_title("Original data")
axes[0].scatter(X_blob[:, 0], X_blob[:, 1], c=X_pca[:, 0], linewidths=0, s=60, cmap='viridis')
axes[0].set_xlabel("feature 1")
axes[0].set_ylabel("feature 2")
axes[0].set_aspect('equal')
axes[1].set_title("Whitened data")
axes[1].scatter(X_pca[:, 0], X_pca[:, 1], c=X_pca[:, 0], linewidths=0, s=60, cmap='viridis')
axes[1].set_xlabel("First principal component")
axes[1].set_ylabel("Second principal component")
axes[1].set_aspect('equal')
axes[1].set_xlim(-3, 4)
@memory.cache
def pca_faces(X_train, X_test):
# copy and pasted from nmf. refactor?
# Build NMF models with 10, 50, 100, 500 and 2000 components
# this list will hold the back-transformd test-data
reduced_images = []
for n_components in [10, 50, 100, 500, 2000]:
# build the NMF model
pca = PCA(n_components=n_components)
pca.fit(X_train)
# transform the test data (afterwards has n_components many dimensions)
X_test_pca = pca.transform(X_test)
# back-transform the transformed test-data
# (afterwards it's in the original space again)
X_test_back = pca.inverse_transform(X_test_pca)
reduced_images.append(X_test_back)
return reduced_images
def plot_pca_faces(X_train, X_test, image_shape):
reduced_images = pca_faces(X_train, X_test)
# plot the first three images in the test set:
fix, axes = plt.subplots(3, 5, figsize=(15, 12),
subplot_kw={'xticks': (), 'yticks': ()})
for i, ax in enumerate(axes):
# plot original image
ax[0].imshow(X_test[i].reshape(image_shape),
vmin=0, vmax=1)
# plot the four back-transformed images
for a, X_test_back in zip(ax[1:], reduced_images):
a.imshow(X_test_back[i].reshape(image_shape), vmin=0, vmax=1)
# label the top row
axes[0, 0].set_title("original image")
for ax, n_components in zip(axes[0, 1:], [10, 50, 100, 500, 2000]):
ax.set_title("%d components" % n_components)
|
mit
|
doublsky/MLProfile
|
util.py
|
1
|
9442
|
"""
Utilities for sklearn BLAS profiling.
"""
import argparse
import pandas as pd
import numpy as np
import os
import re
def gen_bench_list(bench_dir):
all_files = os.listdir(bench_dir)
pattern = re.compile("bench_.*\.py")
bench_list = [x for x in all_files if re.match(pattern, x)]
return bench_list
def gen_blist_file(args):
bench_list = gen_bench_list(args.bench_dir)
with open(args.output, "w") as f:
f.write("\n".join(bench_list)+"\n")
def dict_to_str(input_dict):
return " ".join("{} {}".format(key, val) for key, val in input_dict.items())
def write_config_file(filename, configs):
with open(filename, "w") as f:
for config in configs:
if (config["-ns"] * config["-nf"] < 1e8+1):
f.write(dict_to_str(config)+"\n")
def gen_dataset(RorC, ns, nf):
# imports
from sklearn.datasets.samples_generator import make_regression, make_classification
# get path to project root
MLProf_root = os.environ["MLPROF_ROOT"]
if RorC == "reg":
X, y = make_regression(n_samples=ns,
n_features=nf,
n_informative=nf//2,
noise=0.1)
elif RorC == "clf":
X, y = make_classification(n_samples=ns,
n_features=nf,
n_informative=nf//2,
n_redundant=nf//10,
n_classes=2)
else:
raise ValueError("RorC must be either reg or clf, got " + str(RorC))
X_name = "{}X_ns{}_nf{}".format(RorC, ns, nf)
X_path = os.path.join(MLProf_root, "dataset", X_name)
np.save(X_path, X)
y_name = "{}y_ns{}_nf{}".format(RorC, ns, nf)
y_path = os.path.join(MLProf_root, "dataset", y_name)
np.save(y_path, y)
newX = np.load(X_path+".npy")
newy = np.load(y_path+".npy")
assert np.array_equal(newX, X)
assert np.array_equal(newy, y)
def maybe_create_dataset(config_line):
parser = argparse.ArgumentParser()
parser.add_argument("-ns", type=int)
parser.add_argument("-nf", type=int)
configs, _ = parser.parse_known_args(config_line.split())
MLProf_root = os.environ["MLPROF_ROOT"]
dataset_dir = os.path.join(MLProf_root, "dataset")
# maybe create dataset dir
if not os.path.exists(dataset_dir):
os.mkdir(dataset_dir)
for prefix in ["regX", "regy", "clfX", "clfy"]:
dataset_name = "{}_ns{}_nf{}.npy".format(prefix, configs.ns, configs.nf)
dataset_path = os.path.join(dataset_dir, dataset_name)
if not os.path.exists(dataset_path):
gen_dataset(prefix[0:3], configs.ns, configs.nf)
def get_config_file(benchfile, prof_type):
old_style_config_file = benchfile.replace(".py", ".args")
if os.path.exists(old_style_config_file):
return old_style_config_file
else:
return benchfile.replace(".py", ".{}cfg".format(prof_type))
def get_argfile(benchfile):
return get_config_file(benchfile)
def str2bool(s):
if s == "True":
return True
elif s == "False":
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def get_series_signature(series):
ret = ""
for _, value in series.iteritems():
if pd.isnull(value):
ret += "0"
else:
ret += "1"
return int(ret, 2)
def post_processing(df):
df.drop("workload", axis=1, inplace=True)
for index, row in df.iterrows():
sig = get_series_signature(row)
df.set_value(index, "signature", sig)
def post_proc(args):
df = pd.read_csv(args.input, index_col=False)
post_processing(df)
df.to_csv(args.o, index=False)
def gen_klist(args):
data_df = pd.read_excel(args.input, sheetname="Dict")
data_df["Kernel Name"].to_csv(args.outtxt, header=False, index=False)
with open(args.outhead, "w") as f:
f.write("#define KERNEL_SIZE " + str(data_df.shape[0]) + "\n")
f.write("char* kernel_list[] = {\n")
for _, row in data_df.iterrows():
f.write(' "' + row["Kernel Name"] + '",\n')
f.write(' "dummy"\n}; \n')
def arg2csv(infile):
result = pd.DataFrame()
idx = 0
with open(infile, "r") as argf:
for line in argf:
argslist = line.split()
for key, value in zip(argslist[0::2], argslist[1::2]):
result.set_value(idx, re.sub("^-+", "", key), value)
idx += 1
return result
def arg2csv_intf(args):
result_df = arg2csv(args.argfile)
result_df.to_csv(args.csvfile, index=False)
def find_dep(args):
addr_dict = {}
kernel_set = set()
with open(args.trace, "r") as f:
for line in f:
kernel, rw, addr = line.split()
kernel_set.add(kernel)
if addr in addr_dict:
addr_dict[addr].append((kernel, rw))
else:
addr_dict[addr] = [(kernel, rw)]
dep_dict = {}
for k1 in kernel_set:
for k2 in kernel_set:
dep_dict[k1, k2] = 0
for addr in addr_dict:
writer = ""
for kernel, rw in addr_dict[addr]:
if rw == "W":
writer = kernel
if rw == "R" and writer != "":
dep_dict[writer, kernel] += 1
for k1 in kernel_set:
for k2 in kernel_set:
print k1, "to", k2, ":", dep_dict[k1, k2]
def parse_trace(tracefile):
owner = {}
comm_matrix = {}
with tracefile as trace:
for line in trace:
kernel, num_calls, rw, addr, size = line.split()
size = int(size)
addr_int = int(addr, 16)
# writer immediately becomes the owner
if rw == "W":
# foreach byte
for i in range(0, size):
effective_addr = hex(addr_int+i)
owner[effective_addr] = (kernel, "W")
# first assume everything goes to memory
if rw == "R":
if ("memory", kernel) in comm_matrix:
comm_matrix["memory", kernel] += size
else:
comm_matrix["memory", kernel] = size
elif rw == "W":
if (kernel, "memory") in comm_matrix:
comm_matrix[kernel, "memory"] += size
else:
comm_matrix[kernel, "memory"] = size
else:
raise Exception("Unknown memory operation in trace, line: " + line)
# now consider comm/locality
if rw == "R":
# foreach byte
for i in range(0, size):
effective_addr = hex(addr_int+i)
if effective_addr in owner:
if owner[effective_addr][1] == "W":
comm_matrix[owner[effective_addr][0], "memory"] -= 1
comm_matrix["memory", kernel] -= 1
if (owner[effective_addr][0], kernel) in comm_matrix:
comm_matrix[owner[effective_addr][0], kernel] += 1
else:
comm_matrix[owner[effective_addr][0], kernel] = 1
owner[effective_addr] = (kernel, "R")
return comm_matrix
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(description="Utilities for BLAS profiling.")
subparsers = parser.add_subparsers(title="available sub-command")
# post_process
parser_pp = subparsers.add_parser("post_proc", help="generate signature for each use case")
parser_pp.add_argument("input", type=str, help="Path to input .csv file")
parser_pp.add_argument("-o", default="data.csv", type=str,
help="Path to output .csv file")
parser_pp.set_defaults(func=post_proc)
# generate kernel list
parser_klist = subparsers.add_parser("klist", help="extract all kernels from input Excel file")
parser_klist.add_argument("input", type=str, help="path to input excel file")
parser_klist.add_argument("--outtxt", default="kernel_list.txt", type=str, help="path to output file")
parser_klist.add_argument("--outhead", default="kernel_list.h", type=str, help="path to output file")
parser_klist.set_defaults(func=gen_klist)
# convert .args file to .csv file
parser_a2c = subparsers.add_parser("arg2csv", help="convert a bench_<app>.args file to .csv format")
parser_a2c.add_argument("argfile", type=str, help="Path to input .args file")
parser_a2c.add_argument("csvfile", type=str, help="Path to output .csv file")
parser_a2c.set_defaults(func=arg2csv_intf)
# find dependency
parser_dep = subparsers.add_parser("depend", help="find dependency among kernels")
parser_dep.add_argument("--trace", default="procatrace.out", help="path to trace file")
parser_dep.set_defaults(func=find_dep)
# generate bench_list file
parser_blist = subparsers.add_parser("blist", help="generate bench_list file")
parser_blist.add_argument("--bench_dir", default="benchmark", help="path to benchmark directory")
parser_blist.add_argument("--output", default="bench_list.txt", help="path to output bench_list file")
parser_blist.set_defaults(func=gen_blist_file)
args = parser.parse_args()
args.func(args)
|
mit
|
chhao91/QGIS
|
python/plugins/processing/algs/qgis/MeanAndStdDevPlot.py
|
19
|
3553
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
MeanAndStdDevPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import numpy as np
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class MeanAndStdDevPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
MEAN_FIELD = 'MEAN_FIELD'
STDDEV_FIELD = 'STDDEV_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Mean and standard deviation plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT,
self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'), self.INPUT,
ParameterTableField.DATA_TYPE_ANY))
self.addParameter(ParameterTableField(self.MEAN_FIELD,
self.tr('Mean field'), self.INPUT))
self.addParameter(ParameterTableField(self.STDDEV_FIELD,
self.tr('StdDev field'), self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
meanfieldname = self.getParameterValue(self.MEAN_FIELD)
stddevfieldname = self.getParameterValue(self.STDDEV_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, meanfieldname, stddevfieldname)
plt.close()
ind = np.arange(len(values[namefieldname]))
width = 0.8
plt.bar(ind, values[meanfieldname], width, color='r',
yerr=values[stddevfieldname],
error_kw=dict(ecolor='yellow'),
)
plt.xticks(ind, values[namefieldname], rotation=45)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
|
gpl-2.0
|
gef756/statsmodels
|
statsmodels/sandbox/stats/multicomp.py
|
26
|
70641
|
'''
from pystatsmodels mailinglist 20100524
Notes:
- unfinished, unverified, but most parts seem to work in MonteCarlo
- one example taken from lecture notes looks ok
- needs cases with non-monotonic inequality for test to see difference between
one-step, step-up and step-down procedures
- FDR doesn't look really better then Bonferoni in the MC examples that I tried
update:
- now tested against R, stats and multtest,
I have all of their methods for p-value correction
- getting Hommel was impossible until I found reference for pvalue correction
- now, since I have p-values correction, some of the original tests (rej/norej)
implementation is not really needed anymore. I think I keep it for reference.
Test procedure for Hommel in development session log
- I haven't updated other functions and classes in here.
- multtest has some good helper function according to docs
- still need to update references, the real papers
- fdr with estimated true hypothesis still missing
- multiple comparison procedures incomplete or missing
- I will get multiple comparison for now only for independent case, which might
be conservative in correlated case (?).
some References:
Gibbons, Jean Dickinson and Chakraborti Subhabrata, 2003, Nonparametric Statistical
Inference, Fourth Edition, Marcel Dekker
p.363: 10.4 THE KRUSKAL-WALLIS ONE-WAY ANOVA TEST AND MULTIPLE COMPARISONS
p.367: multiple comparison for kruskal formula used in multicomp.kruskal
Sheskin, David J., 2004, Handbook of Parametric and Nonparametric Statistical
Procedures, 3rd ed., Chapman&Hall/CRC
Test 21: The Single-Factor Between-Subjects Analysis of Variance
Test 22: The Kruskal-Wallis One-Way Analysis of Variance by Ranks Test
Zwillinger, Daniel and Stephen Kokoska, 2000, CRC standard probability and
statistics tables and formulae, Chapman&Hall/CRC
14.9 WILCOXON RANKSUM (MANN WHITNEY) TEST
S. Paul Wright, Adjusted P-Values for Simultaneous Inference, Biometrics
Vol. 48, No. 4 (Dec., 1992), pp. 1005-1013, International Biometric Society
Stable URL: http://www.jstor.org/stable/2532694
(p-value correction for Hommel in appendix)
for multicomparison
new book "multiple comparison in R"
Hsu is a good reference but I don't have it.
Author: Josef Pktd and example from H Raja and rewrite from Vincent Davis
TODO
----
* handle exception if empty, shows up only sometimes when running this
- DONE I think
Traceback (most recent call last):
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\multicomp.py", line 711, in <module>
print('sh', multipletests(tpval, alpha=0.05, method='sh')
File "C:\Josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\sandbox\stats\multicomp.py", line 241, in multipletests
rejectmax = np.max(np.nonzero(reject))
File "C:\Programs\Python25\lib\site-packages\numpy\core\fromnumeric.py", line 1765, in amax
return _wrapit(a, 'max', axis, out)
File "C:\Programs\Python25\lib\site-packages\numpy\core\fromnumeric.py", line 37, in _wrapit
result = getattr(asarray(obj),method)(*args, **kwds)
ValueError: zero-size array to ufunc.reduce without identity
* name of function multipletests, rename to something like pvalue_correction?
'''
#import xlrd
#import xlwt
from __future__ import print_function
from statsmodels.compat.python import lzip, range, lrange, zip
import scipy.stats
import numpy
import numpy as np
import math
import copy
from scipy import stats
from statsmodels.iolib.table import SimpleTable
from numpy.testing import assert_almost_equal, assert_equal
#temporary circular import
from statsmodels.stats.multitest import multipletests, _ecdf as ecdf, fdrcorrection as fdrcorrection0, fdrcorrection_twostage
from statsmodels.graphics import utils
qcrit = '''
2 3 4 5 6 7 8 9 10
5 3.64 5.70 4.60 6.98 5.22 7.80 5.67 8.42 6.03 8.91 6.33 9.32 6.58 9.67 6.80 9.97 6.99 10.24
6 3.46 5.24 4.34 6.33 4.90 7.03 5.30 7.56 5.63 7.97 5.90 8.32 6.12 8.61 6.32 8.87 6.49 9.10
7 3.34 4.95 4.16 5.92 4.68 6.54 5.06 7.01 5.36 7.37 5.61 7.68 5.82 7.94 6.00 8.17 6.16 8.37
8 3.26 4.75 4.04 5.64 4.53 6.20 4.89 6.62 5.17 6.96 5.40 7.24 5.60 7.47 5.77 7.68 5.92 7.86
9 3.20 4.60 3.95 5.43 4.41 5.96 4.76 6.35 5.02 6.66 5.24 6.91 5.43 7.13 5.59 7.33 5.74 7.49
10 3.15 4.48 3.88 5.27 4.33 5.77 4.65 6.14 4.91 6.43 5.12 6.67 5.30 6.87 5.46 7.05 5.60 7.21
11 3.11 4.39 3.82 5.15 4.26 5.62 4.57 5.97 4.82 6.25 5.03 6.48 5.20 6.67 5.35 6.84 5.49 6.99
12 3.08 4.32 3.77 5.05 4.20 5.50 4.51 5.84 4.75 6.10 4.95 6.32 5.12 6.51 5.27 6.67 5.39 6.81
13 3.06 4.26 3.73 4.96 4.15 5.40 4.45 5.73 4.69 5.98 4.88 6.19 5.05 6.37 5.19 6.53 5.32 6.67
14 3.03 4.21 3.70 4.89 4.11 5.32 4.41 5.63 4.64 5.88 4.83 6.08 4.99 6.26 5.13 6.41 5.25 6.54
15 3.01 4.17 3.67 4.84 4.08 5.25 4.37 5.56 4.59 5.80 4.78 5.99 4.94 6.16 5.08 6.31 5.20 6.44
16 3.00 4.13 3.65 4.79 4.05 5.19 4.33 5.49 4.56 5.72 4.74 5.92 4.90 6.08 5.03 6.22 5.15 6.35
17 2.98 4.10 3.63 4.74 4.02 5.14 4.30 5.43 4.52 5.66 4.70 5.85 4.86 6.01 4.99 6.15 5.11 6.27
18 2.97 4.07 3.61 4.70 4.00 5.09 4.28 5.38 4.49 5.60 4.67 5.79 4.82 5.94 4.96 6.08 5.07 6.20
19 2.96 4.05 3.59 4.67 3.98 5.05 4.25 5.33 4.47 5.55 4.65 5.73 4.79 5.89 4.92 6.02 5.04 6.14
20 2.95 4.02 3.58 4.64 3.96 5.02 4.23 5.29 4.45 5.51 4.62 5.69 4.77 5.84 4.90 5.97 5.01 6.09
24 2.92 3.96 3.53 4.55 3.90 4.91 4.17 5.17 4.37 5.37 4.54 5.54 4.68 5.69 4.81 5.81 4.92 5.92
30 2.89 3.89 3.49 4.45 3.85 4.80 4.10 5.05 4.30 5.24 4.46 5.40 4.60 5.54 4.72 5.65 4.82 5.76
40 2.86 3.82 3.44 4.37 3.79 4.70 4.04 4.93 4.23 5.11 4.39 5.26 4.52 5.39 4.63 5.50 4.73 5.60
60 2.83 3.76 3.40 4.28 3.74 4.59 3.98 4.82 4.16 4.99 4.31 5.13 4.44 5.25 4.55 5.36 4.65 5.45
120 2.80 3.70 3.36 4.20 3.68 4.50 3.92 4.71 4.10 4.87 4.24 5.01 4.36 5.12 4.47 5.21 4.56 5.30
infinity 2.77 3.64 3.31 4.12 3.63 4.40 3.86 4.60 4.03 4.76 4.17 4.88 4.29 4.99 4.39 5.08 4.47 5.16
'''
res = [line.split() for line in qcrit.replace('infinity','9999').split('\n')]
c=np.array(res[2:-1]).astype(float)
#c[c==9999] = np.inf
ccols = np.arange(2,11)
crows = c[:,0]
cv005 = c[:, 1::2]
cv001 = c[:, 2::2]
from scipy import interpolate
def get_tukeyQcrit(k, df, alpha=0.05):
'''
return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations
'''
if alpha == 0.05:
intp = interpolate.interp1d(crows, cv005[:,k-2])
elif alpha == 0.01:
intp = interpolate.interp1d(crows, cv001[:,k-2])
else:
raise ValueError('only implemented for alpha equal to 0.01 and 0.05')
return intp(df)
def get_tukeyQcrit2(k, df, alpha=0.05):
'''
return critical values for Tukey's HSD (Q)
Parameters
----------
k : int in {2, ..., 10}
number of tests
df : int
degrees of freedom of error term
alpha : {0.05, 0.01}
type 1 error, 1-confidence level
not enough error checking for limitations
'''
from statsmodels.stats.libqsturng import qsturng
return qsturng(1-alpha, k, df)
def Tukeythreegene(first,second,third):
#Performing the Tukey HSD post-hoc test for three genes
## qwb = xlrd.open_workbook('F:/Lab/bioinformatics/qcrittable.xls')
## #opening the workbook containing the q crit table
## qwb.sheet_names()
## qcrittable = qwb.sheet_by_name(u'Sheet1')
firstmean = numpy.mean(first) #means of the three arrays
secondmean = numpy.mean(second)
thirdmean = numpy.mean(third)
firststd = numpy.std(first) #standard deviations of the threearrays
secondstd = numpy.std(second)
thirdstd = numpy.std(third)
firsts2 = math.pow(firststd,2) #standard deviation squared of the three arrays
seconds2 = math.pow(secondstd,2)
thirds2 = math.pow(thirdstd,2)
mserrornum = firsts2*2+seconds2*2+thirds2*2 #numerator for mean square error
mserrorden = (len(first)+len(second)+len(third))-3 #denominator for mean square error
mserror = mserrornum/mserrorden #mean square error
standarderror = math.sqrt(mserror/len(first))
#standard error, which is square root of mserror and the number of samples in a group
dftotal = len(first)+len(second)+len(third)-1 #various degrees of freedom
dfgroups = 2
dferror = dftotal-dfgroups
qcrit = 0.5 # fix arbitrary#qcrittable.cell(dftotal, 3).value
qcrit = get_tukeyQcrit(3, dftotal, alpha=0.05)
#getting the q critical value, for degrees of freedom total and 3 groups
qtest3to1 = (math.fabs(thirdmean-firstmean))/standarderror
#calculating q test statistic values
qtest3to2 = (math.fabs(thirdmean-secondmean))/standarderror
qtest2to1 = (math.fabs(secondmean-firstmean))/standarderror
conclusion = []
## print(qcrit
print(qtest3to1)
print(qtest3to2)
print(qtest2to1)
if(qtest3to1>qcrit): #testing all q test statistic values to q critical values
conclusion.append('3to1null')
else:
conclusion.append('3to1alt')
if(qtest3to2>qcrit):
conclusion.append('3to2null')
else:
conclusion.append('3to2alt')
if(qtest2to1>qcrit):
conclusion.append('2to1null')
else:
conclusion.append('2to1alt')
return conclusion
#rewrite by Vincent
def Tukeythreegene2(genes): #Performing the Tukey HSD post-hoc test for three genes
"""gend is a list, ie [first, second, third]"""
# qwb = xlrd.open_workbook('F:/Lab/bioinformatics/qcrittable.xls')
#opening the workbook containing the q crit table
# qwb.sheet_names()
# qcrittable = qwb.sheet_by_name(u'Sheet1')
means = []
stds = []
for gene in genes:
means.append(numpy.mean(gene))
std.append(numpy.std(gene))
#firstmean = numpy.mean(first) #means of the three arrays
#secondmean = numpy.mean(second)
#thirdmean = numpy.mean(third)
#firststd = numpy.std(first) #standard deviations of the three arrays
#secondstd = numpy.std(second)
#thirdstd = numpy.std(third)
stds2 = []
for std in stds:
stds2.append(math.pow(std,2))
#firsts2 = math.pow(firststd,2) #standard deviation squared of the three arrays
#seconds2 = math.pow(secondstd,2)
#thirds2 = math.pow(thirdstd,2)
#mserrornum = firsts2*2+seconds2*2+thirds2*2 #numerator for mean square error
mserrornum = sum(stds2)*2
mserrorden = (len(genes[0])+len(genes[1])+len(genes[2]))-3 #denominator for mean square error
mserror = mserrornum/mserrorden #mean square error
def catstack(args):
x = np.hstack(args)
labels = np.hstack([k*np.ones(len(arr)) for k,arr in enumerate(args)])
return x, labels
def maxzero(x):
'''find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6]))
'''
x = np.asarray(x)
cond1 = x[:-1] < 0
cond2 = x[1:] > 0
#allzeros = np.nonzero(np.sign(x[:-1])*np.sign(x[1:]) <= 0)[0] + 1
allzeros = np.nonzero((cond1 & cond2) | (x[1:]==0))[0] + 1
if x[-1] >=0:
maxz = max(allzeros)
else:
maxz = None
return maxz, allzeros
def maxzerodown(x):
'''find all up zero crossings and return the index of the highest
Not used anymore
>>> np.random.seed(12345)
>>> x = np.random.randn(8)
>>> x
array([-0.20470766, 0.47894334, -0.51943872, -0.5557303 , 1.96578057,
1.39340583, 0.09290788, 0.28174615])
>>> maxzero(x)
(4, array([1, 4]))
no up-zero-crossing at end
>>> np.random.seed(0)
>>> x = np.random.randn(8)
>>> x
array([ 1.76405235, 0.40015721, 0.97873798, 2.2408932 , 1.86755799,
-0.97727788, 0.95008842, -0.15135721])
>>> maxzero(x)
(None, array([6]))
'''
x = np.asarray(x)
cond1 = x[:-1] > 0
cond2 = x[1:] < 0
#allzeros = np.nonzero(np.sign(x[:-1])*np.sign(x[1:]) <= 0)[0] + 1
allzeros = np.nonzero((cond1 & cond2) | (x[1:]==0))[0] + 1
if x[-1] <=0:
maxz = max(allzeros)
else:
maxz = None
return maxz, allzeros
def rejectionline(n, alpha=0.5):
'''reference line for rejection in multiple tests
Not used anymore
from: section 3.2, page 60
'''
t = np.arange(n)/float(n)
frej = t/( t * (1-alpha) + alpha)
return frej
#I don't remember what I changed or why 2 versions,
#this follows german diss ??? with rline
#this might be useful if the null hypothesis is not "all effects are zero"
#rename to _bak and working again on fdrcorrection0
def fdrcorrection_bak(pvals, alpha=0.05, method='indep'):
'''Reject False discovery rate correction for pvalues
Old version, to be deleted
missing: methods that estimate fraction of true hypotheses
'''
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = pvals[pvals_sortind]
pecdf = ecdf(pvals_sorted)
if method in ['i', 'indep', 'p', 'poscorr']:
rline = pvals_sorted / alpha
elif method in ['n', 'negcorr']:
cm = np.sum(1./np.arange(1, len(pvals)))
rline = pvals_sorted / alpha * cm
elif method in ['g', 'onegcorr']: #what's this ? german diss
rline = pvals_sorted / (pvals_sorted*(1-alpha) + alpha)
elif method in ['oth', 'o2negcorr']: # other invalid, cut-paste
cm = np.sum(np.arange(len(pvals)))
rline = pvals_sorted / alpha /cm
else:
raise ValueError('method not available')
reject = pecdf >= rline
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
else:
rejectmax = 0
reject[:rejectmax] = True
return reject[pvals_sortind.argsort()]
def mcfdr(nrepl=100, nobs=50, ntests=10, ntrue=6, mu=0.5, alpha=0.05, rho=0.):
'''MonteCarlo to test fdrcorrection
'''
nfalse = ntests - ntrue
locs = np.array([0.]*ntrue + [mu]*(ntests - ntrue))
results = []
for i in range(nrepl):
#rvs = locs + stats.norm.rvs(size=(nobs, ntests))
rvs = locs + randmvn(rho, size=(nobs, ntests))
tt, tpval = stats.ttest_1samp(rvs, 0)
res = fdrcorrection_bak(np.abs(tpval), alpha=alpha, method='i')
res0 = fdrcorrection0(np.abs(tpval), alpha=alpha)
#res and res0 give the same results
results.append([np.sum(res[:ntrue]), np.sum(res[ntrue:])] +
[np.sum(res0[:ntrue]), np.sum(res0[ntrue:])] +
res.tolist() +
np.sort(tpval).tolist() +
[np.sum(tpval[:ntrue]<alpha),
np.sum(tpval[ntrue:]<alpha)] +
[np.sum(tpval[:ntrue]<alpha/ntests),
np.sum(tpval[ntrue:]<alpha/ntests)])
return np.array(results)
def randmvn(rho, size=(1, 2), standardize=False):
'''create random draws from equi-correlated multivariate normal distribution
Parameters
----------
rho : float
correlation coefficient
size : tuple of int
size is interpreted (nobs, nvars) where each row
Returns
-------
rvs : ndarray, (nobs, nvars)
where each row is a independent random draw of nvars-dimensional correlated rvs
'''
nobs, nvars = size
if 0 < rho and rho < 1:
rvs = np.random.randn(nobs, nvars+1)
rvs2 = rvs[:,:-1] * np.sqrt((1-rho)) + rvs[:,-1:] * np.sqrt(rho)
elif rho ==0:
rvs2 = np.random.randn(nobs, nvars)
elif rho < 0:
if rho < -1./(nvars-1):
raise ValueError('rho has to be larger than -1./(nvars-1)')
elif rho == -1./(nvars-1):
rho = -1./(nvars-1+1e-10) #barely positive definite
#use Cholesky
A = rho*np.ones((nvars,nvars))+(1-rho)*np.eye(nvars)
rvs2 = np.dot(np.random.randn(nobs, nvars), np.linalg.cholesky(A).T)
if standardize:
rvs2 = stats.zscore(rvs2)
return rvs2
#============================
#
# Part 2: Multiple comparisons and independent samples tests
#
#============================
def tiecorrect(xranks):
'''
should be equivalent of scipy.stats.tiecorrect
'''
#casting to int rounds down, but not relevant for this case
rankbincount = np.bincount(np.asarray(xranks,dtype=int))
nties = rankbincount[rankbincount > 1]
ntot = float(len(xranks));
tiecorrection = 1 - (nties**3 - nties).sum()/(ntot**3 - ntot)
return tiecorrection
class GroupsStats(object):
'''
statistics by groups (another version)
groupstats as a class with lazy evaluation (not yet - decorators are still
missing)
written this time as equivalent of scipy.stats.rankdata
gs = GroupsStats(X, useranks=True)
assert_almost_equal(gs.groupmeanfilter, stats.rankdata(X[:,0]), 15)
TODO: incomplete doc strings
'''
def __init__(self, x, useranks=False, uni=None, intlab=None):
'''descriptive statistics by groups
Parameters
----------
x : array, 2d
first column data, second column group labels
useranks : boolean
if true, then use ranks as data corresponding to the
scipy.stats.rankdata definition (start at 1, ties get mean)
uni, intlab : arrays (optional)
to avoid call to unique, these can be given as inputs
'''
self.x = np.asarray(x)
if intlab is None:
uni, intlab = np.unique(x[:,1], return_inverse=True)
elif uni is None:
uni = np.unique(x[:,1])
self.useranks = useranks
self.uni = uni
self.intlab = intlab
self.groupnobs = groupnobs = np.bincount(intlab)
#temporary until separated and made all lazy
self.runbasic(useranks=useranks)
def runbasic_old(self, useranks=False):
#check: refactoring screwed up case useranks=True
#groupxsum = np.bincount(intlab, weights=X[:,0])
#groupxmean = groupxsum * 1.0 / groupnobs
x = self.x
if useranks:
self.xx = x[:,1].argsort().argsort() + 1 #rankraw
else:
self.xx = x[:,0]
self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
#print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
# start at 1 for stats.rankdata :
self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
self.groupmeanfilter = grouprankmean[self.intlab]
#return grouprankmean[intlab]
def runbasic(self, useranks=False):
#check: refactoring screwed up case useranks=True
#groupxsum = np.bincount(intlab, weights=X[:,0])
#groupxmean = groupxsum * 1.0 / groupnobs
x = self.x
if useranks:
xuni, xintlab = np.unique(x[:,0], return_inverse=True)
ranksraw = x[:,0].argsort().argsort() + 1 #rankraw
self.xx = GroupsStats(np.column_stack([ranksraw, xintlab]),
useranks=False).groupmeanfilter
else:
self.xx = x[:,0]
self.groupsum = groupranksum = np.bincount(self.intlab, weights=self.xx)
#print('groupranksum', groupranksum, groupranksum.shape, self.groupnobs.shape
# start at 1 for stats.rankdata :
self.groupmean = grouprankmean = groupranksum * 1.0 / self.groupnobs # + 1
self.groupmeanfilter = grouprankmean[self.intlab]
#return grouprankmean[intlab]
def groupdemean(self):
return self.xx - self.groupmeanfilter
def groupsswithin(self):
xtmp = self.groupdemean()
return np.bincount(self.intlab, weights=xtmp**2)
def groupvarwithin(self):
return self.groupsswithin()/(self.groupnobs-1) #.sum()
class TukeyHSDResults(object):
"""Results from Tukey HSD test, with additional plot methods
Can also compute and plot additional post-hoc evaluations using this
results class.
Attributes
----------
reject : array of boolean, True if we reject Null for group pair
meandiffs : pairwise mean differences
confint : confidence interval for pairwise mean differences
std_pairs : standard deviation of pairwise mean differences
q_crit : critical value of studentized range statistic at given alpha
halfwidths : half widths of simultaneous confidence interval
Notes
-----
halfwidths is only available after call to `plot_simultaneous`.
Other attributes contain information about the data from the
MultiComparison instance: data, df_total, groups, groupsunique, variance.
"""
def __init__(self, mc_object, results_table, q_crit, reject=None,
meandiffs=None, std_pairs=None, confint=None, df_total=None,
reject2=None, variance=None):
self._multicomp = mc_object
self._results_table = results_table
self.q_crit = q_crit
self.reject = reject
self.meandiffs = meandiffs
self.std_pairs = std_pairs
self.confint = confint
self.df_total = df_total
self.reject2 = reject2
self.variance = variance
# Taken out of _multicomp for ease of access for unknowledgeable users
self.data = self._multicomp.data
self.groups =self._multicomp.groups
self.groupsunique = self._multicomp.groupsunique
def __str__(self):
return str(self._results_table)
def summary(self):
'''Summary table that can be printed
'''
return self._results_table
def _simultaneous_ci(self):
"""Compute simultaneous confidence intervals for comparison of means.
"""
self.halfwidths = simultaneous_ci(self.q_crit, self.variance,
self._multicomp.groupstats.groupnobs,
self._multicomp.pairindices)
def plot_simultaneous(self, comparison_name=None, ax=None, figsize=(10,6),
xlabel=None, ylabel=None):
"""Plot a universal confidence interval of each group mean
Visiualize significant differences in a plot with one confidence
interval per group instead of all pairwise confidence intervals.
Parameters
----------
comparison_name : string, optional
if provided, plot_intervals will color code all groups that are
significantly different from the comparison_name red, and will
color code insignificant groups gray. Otherwise, all intervals will
just be plotted in black.
ax : matplotlib axis, optional
An axis handle on which to attach the plot.
figsize : tuple, optional
tuple for the size of the figure generated
xlabel : string, optional
Name to be displayed on x axis
ylabel : string, optional
Name to be displayed on y axis
Returns
-------
fig : Matplotlib Figure object
handle to figure object containing interval plots
Notes
-----
Multiple comparison tests are nice, but lack a good way to be
visualized. If you have, say, 6 groups, showing a graph of the means
between each group will require 15 confidence intervals.
Instead, we can visualize inter-group differences with a single
interval for each group mean. Hochberg et al. [1] first proposed this
idea and used Tukey's Q critical value to compute the interval widths.
Unlike plotting the differences in the means and their respective
confidence intervals, any two pairs can be compared for significance
by looking for overlap.
References
----------
.. [1] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.
Examples
--------
>>> from statsmodels.examples.try_tukey_hsd import cylinders, cyl_labels
>>> from statsmodels.stats.multicomp import MultiComparison
>>> cardata = MultiComparison(cylinders, cyl_labels)
>>> results = cardata.tukeyhsd()
>>> results.plot_simultaneous()
<matplotlib.figure.Figure at 0x...>
This example shows an example plot comparing significant differences
in group means. Significant differences at the alpha=0.05 level can be
identified by intervals that do not overlap (i.e. USA vs Japan,
USA vs Germany).
>>> results.plot_simultaneous(comparison_name="USA")
<matplotlib.figure.Figure at 0x...>
Optionally provide one of the group names to color code the plot to
highlight group means different from comparison_name.
"""
fig, ax1 = utils.create_mpl_ax(ax)
if figsize is not None:
fig.set_size_inches(figsize)
if getattr(self, 'halfwidths', None) is None:
self._simultaneous_ci()
means = self._multicomp.groupstats.groupmean
sigidx = []
nsigidx = []
minrange = [means[i] - self.halfwidths[i] for i in range(len(means))]
maxrange = [means[i] + self.halfwidths[i] for i in range(len(means))]
if comparison_name is None:
ax1.errorbar(means, lrange(len(means)), xerr=self.halfwidths,
marker='o', linestyle='None', color='k', ecolor='k')
else:
if comparison_name not in self.groupsunique:
raise ValueError('comparison_name not found in group names.')
midx = np.where(self.groupsunique==comparison_name)[0]
for i in range(len(means)):
if self.groupsunique[i] == comparison_name:
continue
if (min(maxrange[i], maxrange[midx]) -
max(minrange[i], minrange[midx]) < 0):
sigidx.append(i)
else:
nsigidx.append(i)
#Plot the master comparison
ax1.errorbar(means[midx], midx, xerr=self.halfwidths[midx],
marker='o', linestyle='None', color='b', ecolor='b')
ax1.plot([minrange[midx]]*2, [-1, self._multicomp.ngroups],
linestyle='--', color='0.7')
ax1.plot([maxrange[midx]]*2, [-1, self._multicomp.ngroups],
linestyle='--', color='0.7')
#Plot those that are significantly different
if len(sigidx) > 0:
ax1.errorbar(means[sigidx], sigidx,
xerr=self.halfwidths[sigidx], marker='o',
linestyle='None', color='r', ecolor='r')
#Plot those that are not significantly different
if len(nsigidx) > 0:
ax1.errorbar(means[nsigidx], nsigidx,
xerr=self.halfwidths[nsigidx], marker='o',
linestyle='None', color='0.5', ecolor='0.5')
ax1.set_title('Multiple Comparisons Between All Pairs (Tukey)')
r = np.max(maxrange) - np.min(minrange)
ax1.set_ylim([-1, self._multicomp.ngroups])
ax1.set_xlim([np.min(minrange) - r / 10., np.max(maxrange) + r / 10.])
ax1.set_yticklabels(np.insert(self.groupsunique.astype(str), 0, ''))
ax1.set_yticks(np.arange(-1, len(means)+1))
ax1.set_xlabel(xlabel if xlabel is not None else '')
ax1.set_ylabel(ylabel if ylabel is not None else '')
return fig
class MultiComparison(object):
'''Tests for multiple comparisons
Parameters
----------
data : array
independent data samples
groups : array
group labels corresponding to each data point
group_order : list of strings, optional
the desired order for the group mean results to be reported in. If
not specified, results are reported in increasing order.
If group_order does not contain all labels that are in groups, then
only those observations are kept that have a label in group_order.
'''
def __init__(self, data, groups, group_order=None):
if len(data) != len(groups):
raise ValueError('data has %d elements and groups has %d' % (len(data), len(groups)))
self.data = np.asarray(data)
self.groups = groups = np.asarray(groups)
# Allow for user-provided sorting of groups
if group_order is None:
self.groupsunique, self.groupintlab = np.unique(groups,
return_inverse=True)
else:
#check if group_order has any names not in groups
for grp in group_order:
if grp not in groups:
raise ValueError(
"group_order value '%s' not found in groups"%grp)
self.groupsunique = np.array(group_order)
self.groupintlab = np.empty(len(data), int)
self.groupintlab.fill(-999) # instead of a nan
count = 0
for name in self.groupsunique:
idx = np.where(self.groups == name)[0]
count += len(idx)
self.groupintlab[idx] = np.where(self.groupsunique == name)[0]
if count != data.shape[0]:
#raise ValueError('group_order does not contain all groups')
# warn and keep only observations with label in group_order
import warnings
warnings.warn('group_order does not contain all groups:' +
' dropping observations')
mask_keep = self.groupintlab != -999
self.groupintlab = self.groupintlab[mask_keep]
self.data = self.data[mask_keep]
self.groups = self.groups[mask_keep]
if len(self.groupsunique) < 2:
raise ValueError('2 or more groups required for multiple comparisons')
self.datali = [data[self.groups == k] for k in self.groupsunique]
self.pairindices = np.triu_indices(len(self.groupsunique), 1) #tuple
self.nobs = self.data.shape[0]
self.ngroups = len(self.groupsunique)
def getranks(self):
'''convert data to rankdata and attach
This creates rankdata as it is used for non-parametric tests, where
in the case of ties the average rank is assigned.
'''
#bug: the next should use self.groupintlab instead of self.groups
#update: looks fixed
#self.ranks = GroupsStats(np.column_stack([self.data, self.groups]),
self.ranks = GroupsStats(np.column_stack([self.data, self.groupintlab]),
useranks=True)
self.rankdata = self.ranks.groupmeanfilter
def kruskal(self, pairs=None, multimethod='T'):
'''
pairwise comparison for kruskal-wallis test
This is just a reimplementation of scipy.stats.kruskal and does
not yet use a multiple comparison correction.
'''
self.getranks()
tot = self.nobs
meanranks = self.ranks.groupmean
groupnobs = self.ranks.groupnobs
# simultaneous/separate treatment of multiple tests
f=(tot * (tot + 1.) / 12.) / stats.tiecorrect(self.rankdata) #(xranks)
print('MultiComparison.kruskal')
for i,j in zip(*self.pairindices):
#pdiff = np.abs(mrs[i] - mrs[j])
pdiff = np.abs(meanranks[i] - meanranks[j])
se = np.sqrt(f * np.sum(1. / groupnobs[[i,j]] )) #np.array([8,8]))) #Fixme groupnobs[[i,j]] ))
Q = pdiff / se
# TODO : print(statments, fix
print(i,j, pdiff, se, pdiff / se, pdiff / se > 2.6310)
print(stats.norm.sf(Q) * 2)
return stats.norm.sf(Q) * 2
def allpairtest(self, testfunc, alpha=0.05, method='bonf', pvalidx=1):
'''run a pairwise test on all pairs with multiple test correction
The statistical test given in testfunc is calculated for all pairs
and the p-values are adjusted by methods in multipletests. The p-value
correction is generic and based only on the p-values, and does not
take any special structure of the hypotheses into account.
Parameters
----------
testfunc : function
A test function for two (independent) samples. It is assumed that
the return value on position pvalidx is the p-value.
alpha : float
familywise error rate
method : string
This specifies the method for the p-value correction. Any method
of multipletests is possible.
pvalidx : int (default: 1)
position of the p-value in the return of testfunc
Returns
-------
sumtab : SimpleTable instance
summary table for printing
errors: TODO: check if this is still wrong, I think it's fixed.
results from multipletests are in different order
pval_corrected can be larger than 1 ???
'''
res = []
for i,j in zip(*self.pairindices):
res.append(testfunc(self.datali[i], self.datali[j]))
res = np.array(res)
reject, pvals_corrected, alphacSidak, alphacBonf = \
multipletests(res[:, pvalidx], alpha=0.05, method=method)
#print(np.column_stack([res[:,0],res[:,1], reject, pvals_corrected])
i1, i2 = self.pairindices
if pvals_corrected is None:
resarr = np.array(lzip(self.groupsunique[i1], self.groupsunique[i2],
np.round(res[:,0],4),
np.round(res[:,1],4),
reject),
dtype=[('group1', object),
('group2', object),
('stat',float),
('pval',float),
('reject', np.bool8)])
else:
resarr = np.array(lzip(self.groupsunique[i1], self.groupsunique[i2],
np.round(res[:,0],4),
np.round(res[:,1],4),
np.round(pvals_corrected,4),
reject),
dtype=[('group1', object),
('group2', object),
('stat',float),
('pval',float),
('pval_corr',float),
('reject', np.bool8)])
from statsmodels.iolib.table import SimpleTable
results_table = SimpleTable(resarr, headers=resarr.dtype.names)
results_table.title = (
'Test Multiple Comparison %s \n%s%4.2f method=%s'
% (testfunc.__name__, 'FWER=', alpha, method) +
'\nalphacSidak=%4.2f, alphacBonf=%5.3f'
% (alphacSidak, alphacBonf))
return results_table, (res, reject, pvals_corrected,
alphacSidak, alphacBonf), resarr
def tukeyhsd(self, alpha=0.05):
"""Tukey's range test to compare means of all pairs of groups
Parameters
----------
alpha : float, optional
Value of FWER at which to calculate HSD.
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
"""
self.groupstats = GroupsStats(
np.column_stack([self.data, self.groupintlab]),
useranks=False)
gmeans = self.groupstats.groupmean
gnobs = self.groupstats.groupnobs #var_ = self.groupstats.groupvarwithin() #possibly an error in varcorrection in this case
var_ = np.var(self.groupstats.groupdemean(), ddof=len(gmeans))
#res contains: 0:(idx1, idx2), 1:reject, 2:meandiffs, 3: std_pairs, 4:confint, 5:q_crit,
#6:df_total, 7:reject2
res = tukeyhsd(gmeans, gnobs, var_, df=None, alpha=alpha, q_crit=None)
resarr = np.array(lzip(self.groupsunique[res[0][0]], self.groupsunique[res[0][1]],
np.round(res[2],4),
np.round(res[4][:, 0],4),
np.round(res[4][:, 1],4),
res[1]),
dtype=[('group1', object),
('group2', object),
('meandiff',float),
('lower',float),
('upper',float),
('reject', np.bool8)])
results_table = SimpleTable(resarr, headers=resarr.dtype.names)
results_table.title = 'Multiple Comparison of Means - Tukey HSD,' + \
'FWER=%4.2f' % alpha
return TukeyHSDResults(self, results_table, res[5], res[1], res[2],
res[3], res[4], res[6], res[7], var_)
def rankdata(x):
'''rankdata, equivalent to scipy.stats.rankdata
just a different implementation, I have not yet compared speed
'''
uni, intlab = np.unique(x[:,0], return_inverse=True)
groupnobs = np.bincount(intlab)
groupxsum = np.bincount(intlab, weights=X[:,0])
groupxmean = groupxsum * 1.0 / groupnobs
rankraw = x[:,0].argsort().argsort()
groupranksum = np.bincount(intlab, weights=rankraw)
# start at 1 for stats.rankdata :
grouprankmean = groupranksum * 1.0 / groupnobs + 1
return grouprankmean[intlab]
#new
def compare_ordered(vals, alpha):
'''simple ordered sequential comparison of means
vals : array_like
means or rankmeans for independent groups
incomplete, no return, not used yet
'''
vals = np.asarray(vals)
alphaf = alpha # Notation ?
sortind = np.argsort(vals)
pvals = vals[sortind]
sortrevind = sortind.argsort()
ntests = len(vals)
#alphacSidak = 1 - np.power((1. - alphaf), 1./ntests)
#alphacBonf = alphaf / float(ntests)
v1, v2 = np.triu_indices(ntests, 1)
#v1,v2 have wrong sequence
for i in range(4):
for j in range(4,i, -1):
print(i,j)
def varcorrection_unbalanced(nobs_all, srange=False):
'''correction factor for variance with unequal sample sizes
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by the number of samples
for the variance of the studentized range statistic
Returns
-------
correction : float
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplied by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
'''
nobs_all = np.asarray(nobs_all)
if not srange:
return (1./nobs_all).sum()
else:
return (1./nobs_all).sum()/len(nobs_all)
def varcorrection_pairs_unbalanced(nobs_all, srange=False):
'''correction factor for variance with unequal sample sizes for all pairs
this is just a harmonic mean
Parameters
----------
nobs_all : array_like
The number of observations for each sample
srange : bool
if true, then the correction is divided by 2 for the variance of
the studentized range statistic
Returns
-------
correction : array
Correction factor for variance.
Notes
-----
variance correction factor is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
For the studentized range statistic, the resulting factor has to be
divided by 2.
'''
#TODO: test and replace with broadcasting
n1, n2 = np.meshgrid(nobs_all, nobs_all)
if not srange:
return (1./n1 + 1./n2)
else:
return (1./n1 + 1./n2) / 2.
def varcorrection_unequal(var_all, nobs_all, df_all):
'''return joint variance from samples with unequal variances and unequal
sample sizes
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : float
joint variance.
dfjoint : float
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1/n.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
This is for variance of mean difference not of studentized range.
'''
var_all = np.asarray(var_all)
var_over_n = var_all *1./ nobs_all #avoid integer division
varjoint = var_over_n.sum()
dfjoint = varjoint**2 / (var_over_n**2 * df_all).sum()
return varjoint, dfjoint
def varcorrection_pairs_unequal(var_all, nobs_all, df_all):
'''return joint variance from samples with unequal variances and unequal
sample sizes for all pairs
something is wrong
Parameters
----------
var_all : array_like
The variance for each sample
nobs_all : array_like
The number of observations for each sample
df_all : array_like
degrees of freedom for each sample
Returns
-------
varjoint : array
joint variance.
dfjoint : array
joint Satterthwait's degrees of freedom
Notes
-----
(copy, paste not correct)
variance is
1/k * sum_i 1/n_i
where k is the number of samples and summation is over i=0,...,k-1.
If all n_i are the same, then the correction factor is 1.
This needs to be multiplies by the joint variance estimate, means square
error, MSE. To obtain the correction factor for the standard deviation,
square root needs to be taken.
TODO: something looks wrong with dfjoint, is formula from SPSS
'''
#TODO: test and replace with broadcasting
v1, v2 = np.meshgrid(var_all, var_all)
n1, n2 = np.meshgrid(nobs_all, nobs_all)
df1, df2 = np.meshgrid(df_all, df_all)
varjoint = v1/n1 + v2/n2
dfjoint = varjoint**2 / (df1 * (v1/n1)**2 + df2 * (v2/n2)**2)
return varjoint, dfjoint
def tukeyhsd(mean_all, nobs_all, var_all, df=None, alpha=0.05, q_crit=None):
'''simultaneous Tukey HSD
check: instead of sorting, I use absolute value of pairwise differences
in means. That's irrelevant for the test, but maybe reporting actual
differences would be better.
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n
'''
mean_all = np.asarray(mean_all)
#check if or when other ones need to be arrays
n_means = len(mean_all)
if df is None:
df = nobs_all - 1
if np.size(df) == 1: # assumes balanced samples with df = n - 1, n_i = n
df_total = n_means * df
df = np.ones(n_means) * df
else:
df_total = np.sum(df)
if (np.size(nobs_all) == 1) and (np.size(var_all) == 1):
#balanced sample sizes and homogenous variance
var_pairs = 1. * var_all / nobs_all * np.ones((n_means, n_means))
elif np.size(var_all) == 1:
#unequal sample sizes and homogenous variance
var_pairs = var_all * varcorrection_pairs_unbalanced(nobs_all,
srange=True)
elif np.size(var_all) > 1:
var_pairs, df_sum = varcorrection_pairs_unequal(nobs_all, var_all, df)
var_pairs /= 2.
#check division by two for studentized range
else:
raise ValueError('not supposed to be here')
#meandiffs_ = mean_all[:,None] - mean_all
meandiffs_ = mean_all - mean_all[:,None] #reverse sign, check with R example
std_pairs_ = np.sqrt(var_pairs)
#select all pairs from upper triangle of matrix
idx1, idx2 = np.triu_indices(n_means, 1)
meandiffs = meandiffs_[idx1, idx2]
std_pairs = std_pairs_[idx1, idx2]
st_range = np.abs(meandiffs) / std_pairs #studentized range statistic
df_total_ = max(df_total, 5) #TODO: smallest df in table
if q_crit is None:
q_crit = get_tukeyQcrit2(n_means, df_total, alpha=alpha)
reject = st_range > q_crit
crit_int = std_pairs * q_crit
reject2 = np.abs(meandiffs) > crit_int
confint = np.column_stack((meandiffs - crit_int, meandiffs + crit_int))
return (idx1, idx2), reject, meandiffs, std_pairs, confint, q_crit, \
df_total, reject2
def simultaneous_ci(q_crit, var, groupnobs, pairindices=None):
"""Compute simultaneous confidence intervals for comparison of means.
q_crit value is generated from tukey hsd test. Variance is considered
across all groups. Returned halfwidths can be thought of as uncertainty
intervals around each group mean. They allow for simultaneous
comparison of pairwise significance among any pairs (by checking for
overlap)
Parameters
----------
q_crit : float
The Q critical value studentized range statistic from Tukey's HSD
var : float
The group variance
groupnobs : array-like object
Number of observations contained in each group.
pairindices : tuple of lists, optional
Indices corresponding to the upper triangle of matrix. Computed
here if not supplied
Returns
-------
halfwidths : ndarray
Half the width of each confidence interval for each group given in
groupnobs
See Also
--------
MultiComparison : statistics class providing significance tests
tukeyhsd : among other things, computes q_crit value
References
----------
.. [1] Hochberg, Y., and A. C. Tamhane. Multiple Comparison Procedures.
Hoboken, NJ: John Wiley & Sons, 1987.)
"""
# Set initial variables
ng = len(groupnobs)
if pairindices is None:
pairindices = np.triu_indices(ng, 1)
# Compute dij for all pairwise comparisons ala hochberg p. 95
gvar = var / groupnobs
d12 = np.sqrt(gvar[pairindices[0]] + gvar[pairindices[1]])
# Create the full d matrix given all known dij vals
d = np.zeros((ng, ng))
d[pairindices] = d12
d = d + d.conj().T
# Compute the two global sums from hochberg eq 3.32
sum1 = np.sum(d12)
sum2 = np.sum(d, axis=0)
if (ng > 2):
w = ((ng-1.) * sum2 - sum1) / ((ng - 1.) * (ng - 2.))
else:
w = sum1 * np.ones(2, 1) / 2.
return (q_crit / np.sqrt(2))*w
def distance_st_range(mean_all, nobs_all, var_all, df=None, triu=False):
'''pairwise distance matrix, outsourced from tukeyhsd
CHANGED: meandiffs are with sign, studentized range uses abs
q_crit added for testing
TODO: error in variance calculation when nobs_all is scalar, missing 1/n
'''
mean_all = np.asarray(mean_all)
#check if or when other ones need to be arrays
n_means = len(mean_all)
if df is None:
df = nobs_all - 1
if np.size(df) == 1: # assumes balanced samples with df = n - 1, n_i = n
df_total = n_means * df
else:
df_total = np.sum(df)
if (np.size(nobs_all) == 1) and (np.size(var_all) == 1):
#balanced sample sizes and homogenous variance
var_pairs = 1. * var_all / nobs_all * np.ones((n_means, n_means))
elif np.size(var_all) == 1:
#unequal sample sizes and homogenous variance
var_pairs = var_all * varcorrection_pairs_unbalanced(nobs_all,
srange=True)
elif np.size(var_all) > 1:
var_pairs, df_sum = varcorrection_pairs_unequal(nobs_all, var_all, df)
var_pairs /= 2.
#check division by two for studentized range
else:
raise ValueError('not supposed to be here')
#meandiffs_ = mean_all[:,None] - mean_all
meandiffs = mean_all - mean_all[:,None] #reverse sign, check with R example
std_pairs = np.sqrt(var_pairs)
idx1, idx2 = np.triu_indices(n_means, 1)
if triu:
#select all pairs from upper triangle of matrix
meandiffs = meandiffs_[idx1, idx2]
std_pairs = std_pairs_[idx1, idx2]
st_range = np.abs(meandiffs) / std_pairs #studentized range statistic
return st_range, meandiffs, std_pairs, (idx1,idx2) #return square arrays
def contrast_allpairs(nm):
'''contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
'''
contr = []
for i in range(nm):
for j in range(i+1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr)
def contrast_all_one(nm):
'''contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
'''
contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1)))
return contr
def contrast_diff_mean(nm):
'''contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
'''
return np.eye(nm) - np.ones((nm,nm))/nm
def tukey_pvalues(std_range, nm, df):
#corrected but very slow with warnings about integration
from statsmodels.sandbox.distributions.multivariate import mvstdtprob
#nm = len(std_range)
contr = contrast_allpairs(nm)
corr = np.dot(contr, contr.T)/2.
tstat = std_range / np.sqrt(2) * np.ones(corr.shape[0]) #need len of all pairs
return multicontrast_pvalues(tstat, corr, df=df)
def test_tukey_pvalues():
#testcase with 3 is not good because all pairs has also 3*(3-1)/2=3 elements
res = tukey_pvalues(3.649, 3, 16) #3.649*np.ones(3), 16)
assert_almost_equal(0.05, res[0], 3)
assert_almost_equal(0.05*np.ones(3), res[1], 3)
def multicontrast_pvalues(tstat, tcorr, df=None, dist='t', alternative='two-sided'):
'''pvalues for simultaneous tests
'''
from statsmodels.sandbox.distributions.multivariate import mvstdtprob
if (df is None) and (dist == 't'):
raise ValueError('df has to be specified for the t-distribution')
tstat = np.asarray(tstat)
ntests = len(tstat)
cc = np.abs(tstat)
pval_global = 1 - mvstdtprob(-cc,cc, tcorr, df)
pvals = []
for ti in cc:
limits = ti*np.ones(ntests)
pvals.append(1 - mvstdtprob(-cc,cc, tcorr, df))
return pval_global, np.asarray(pvals)
class StepDown(object):
'''a class for step down methods
This is currently for simple tree subset descend, similar to homogeneous_subsets,
but checks all leave-one-out subsets instead of assuming an ordered set.
Comment in SAS manual:
SAS only uses interval subsets of the sorted list, which is sufficient for range
tests (maybe also equal variance and balanced sample sizes are required).
For F-test based critical distances, the restriction to intervals is not sufficient.
This version uses a single critical value of the studentized range distribution
for all comparisons, and is therefore a step-down version of Tukey HSD.
The class is written so it can be subclassed, where the get_distance_matrix and
get_crit are overwritten to obtain other step-down procedures such as REGW.
iter_subsets can be overwritten, to get a recursion as in the many to one comparison
with a control such as in Dunnet's test.
A one-sided right tail test is not covered because the direction of the inequality
is hard coded in check_set. Also Peritz's check of partitions is not possible, but
I have not seen it mentioned in any more recent references.
I have only partially read the step-down procedure for closed tests by Westfall.
One change to make it more flexible, is to separate out the decision on a subset,
also because the F-based tests, FREGW in SPSS, take information from all elements of
a set and not just pairwise comparisons. I haven't looked at the details of
the F-based tests such as Sheffe yet. It looks like running an F-test on equality
of means in each subset. This would also outsource how pairwise conditions are
combined, any larger or max. This would also imply that the distance matrix cannot
be calculated in advance for tests like the F-based ones.
'''
def __init__(self, vals, nobs_all, var_all, df=None):
self.vals = vals
self.n_vals = len(vals)
self.nobs_all = nobs_all
self.var_all = var_all
self.df = df
# the following has been moved to run
#self.cache_result = {}
#self.crit = self.getcrit(0.5) #decide where to set alpha, moved to run
#self.accepted = [] #store accepted sets, not unique
def get_crit(self, alpha):
#currently tukey Q, add others
q_crit = get_tukeyQcrit(self.n_vals, self.df, alpha=alpha)
return q_crit * np.ones(self.n_vals)
def get_distance_matrix(self):
'''studentized range statistic'''
#make into property, decorate
dres = distance_st_range(self.vals, self.nobs_all, self.var_all, df=self.df)
self.distance_matrix = dres[0]
def iter_subsets(self, indices):
for ii in range(len(indices)):
idxsub = copy.copy(indices)
idxsub.pop(ii)
yield idxsub
def check_set(self, indices):
'''check whether pairwise distances of indices satisfy condition
'''
indtup = tuple(indices)
if indtup in self.cache_result:
return self.cache_result[indtup]
else:
set_distance_matrix = self.distance_matrix[np.asarray(indices)[:,None], indices]
n_elements = len(indices)
if np.any(set_distance_matrix > self.crit[n_elements-1]):
res = True
else:
res = False
self.cache_result[indtup] = res
return res
def stepdown(self, indices):
print(indices)
if self.check_set(indices): # larger than critical distance
if (len(indices) > 2): # step down into subsets if more than 2 elements
for subs in self.iter_subsets(indices):
self.stepdown(subs)
else:
self.rejected.append(tuple(indices))
else:
self.accepted.append(tuple(indices))
return indices
def run(self, alpha):
'''main function to run the test,
could be done in __call__ instead
this could have all the initialization code
'''
self.cache_result = {}
self.crit = self.get_crit(alpha) #decide where to set alpha, moved to run
self.accepted = [] #store accepted sets, not unique
self.rejected = []
self.get_distance_matrix()
self.stepdown(lrange(self.n_vals))
return list(set(self.accepted)), list(set(sd.rejected))
def homogeneous_subsets(vals, dcrit):
'''recursively check all pairs of vals for minimum distance
step down method as in Newman-Keuls and Ryan procedures. This is not a
closed procedure since not all partitions are checked.
Parameters
----------
vals : array_like
values that are pairwise compared
dcrit : array_like or float
critical distance for rejecting, either float, or 2-dimensional array
with distances on the upper triangle.
Returns
-------
rejs : list of pairs
list of pair-indices with (strictly) larger than critical difference
nrejs : list of pairs
list of pair-indices with smaller than critical difference
lli : list of tuples
list of subsets with smaller than critical difference
res : tree
result of all comparisons (for checking)
this follows description in SPSS notes on Post-Hoc Tests
Because of the recursive structure, some comparisons are made several
times, but only unique pairs or sets are returned.
Examples
--------
>>> m = [0, 2, 2.5, 3, 6, 8, 9, 9.5,10 ]
>>> rej, nrej, ssli, res = homogeneous_subsets(m, 2)
>>> set_partition(ssli)
([(5, 6, 7, 8), (1, 2, 3), (4,)], [0])
>>> [np.array(m)[list(pp)] for pp in set_partition(ssli)[0]]
[array([ 8. , 9. , 9.5, 10. ]), array([ 2. , 2.5, 3. ]), array([ 6.])]
'''
nvals = len(vals)
indices_ = lrange(nvals)
rejected = []
subsetsli = []
if np.size(dcrit) == 1:
dcrit = dcrit*np.ones((nvals, nvals)) #example numbers for experimenting
def subsets(vals, indices_):
'''recursive function for constructing homogeneous subset
registers rejected and subsetli in outer scope
'''
i, j = (indices_[0], indices_[-1])
if vals[-1] - vals[0] > dcrit[i,j]:
rejected.append((indices_[0], indices_[-1]))
return [subsets(vals[:-1], indices_[:-1]),
subsets(vals[1:], indices_[1:]),
(indices_[0], indices_[-1])]
else:
subsetsli.append(tuple(indices_))
return indices_
res = subsets(vals, indices_)
all_pairs = [(i,j) for i in range(nvals) for j in range(nvals-1,i,-1)]
rejs = set(rejected)
not_rejected = list(set(all_pairs) - rejs)
return list(rejs), not_rejected, list(set(subsetsli)), res
def set_partition(ssli):
'''extract a partition from a list of tuples
this should be correctly called select largest disjoint sets.
Begun and Gabriel 1981 don't seem to be bothered by sets of accepted
hypothesis with joint elements,
e.g. maximal_accepted_sets = { {1,2,3}, {2,3,4} }
This creates a set partition from a list of sets given as tuples.
It tries to find the partition with the largest sets. That is, sets are
included after being sorted by length.
If the list doesn't include the singletons, then it will be only a
partial partition. Missing items are singletons (I think).
Examples
--------
>>> li
[(5, 6, 7, 8), (1, 2, 3), (4, 5), (0, 1)]
>>> set_partition(li)
([(5, 6, 7, 8), (1, 2, 3)], [0, 4])
'''
part = []
for s in sorted(list(set(ssli)), key=len)[::-1]:
#print(s,
s_ = set(s).copy()
if not any(set(s_).intersection(set(t)) for t in part):
#print('inside:', s
part.append(s)
#else: print(part
missing = list(set(i for ll in ssli for i in ll)
- set(i for ll in part for i in ll))
return part, missing
def set_remove_subs(ssli):
'''remove sets that are subsets of another set from a list of tuples
Parameters
----------
ssli : list of tuples
each tuple is considered as a set
Returns
-------
part : list of tuples
new list with subset tuples removed, it is sorted by set-length of tuples. The
list contains original tuples, duplicate elements are not removed.
Examples
--------
>>> set_remove_subs([(0, 1), (1, 2), (1, 2, 3), (0,)])
[(1, 2, 3), (0, 1)]
>>> set_remove_subs([(0, 1), (1, 2), (1,1, 1, 2, 3), (0,)])
[(1, 1, 1, 2, 3), (0, 1)]
'''
#TODO: maybe convert all tuples to sets immediately, but I don't need the extra efficiency
part = []
for s in sorted(list(set(ssli)), key=lambda x: len(set(x)))[::-1]:
#print(s,
#s_ = set(s).copy()
if not any(set(s).issubset(set(t)) for t in part):
#print('inside:', s
part.append(s)
#else: print(part
## missing = list(set(i for ll in ssli for i in ll)
## - set(i for ll in part for i in ll))
return part
if __name__ == '__main__':
examples = ['tukey', 'tukeycrit', 'fdr', 'fdrmc', 'bonf', 'randmvn',
'multicompdev', 'None']#[-1]
if 'tukey' in examples:
#Example Tukey
x = np.array([[0,0,1]]).T + np.random.randn(3, 20)
print(Tukeythreegene(*x))
#Example FDR
#------------
if ('fdr' in examples) or ('bonf' in examples):
x1 = [1,1,1,0,-1,-1,-1,0,1,1,-1,1]
print(lzip(np.arange(len(x1)), x1))
print(maxzero(x1))
#[(0, 1), (1, 1), (2, 1), (3, 0), (4, -1), (5, -1), (6, -1), (7, 0), (8, 1), (9, 1), (10, -1), (11, 1)]
#(11, array([ 3, 7, 11]))
print(maxzerodown(-np.array(x1)))
locs = np.linspace(0,1,10)
locs = np.array([0.]*6 + [0.75]*4)
rvs = locs + stats.norm.rvs(size=(20,10))
tt, tpval = stats.ttest_1samp(rvs, 0)
tpval_sortind = np.argsort(tpval)
tpval_sorted = tpval[tpval_sortind]
reject = tpval_sorted < ecdf(tpval_sorted)*0.05
reject2 = max(np.nonzero(reject))
print(reject)
res = np.array(lzip(np.round(rvs.mean(0),4),np.round(tpval,4),
reject[tpval_sortind.argsort()]),
dtype=[('mean',float),
('pval',float),
('reject', np.bool8)])
#from statsmodels.iolib import SimpleTable
print(SimpleTable(res, headers=res.dtype.names))
print(fdrcorrection_bak(tpval, alpha=0.05))
print(reject)
print('\nrandom example')
print('bonf', multipletests(tpval, alpha=0.05, method='bonf'))
print('sidak', multipletests(tpval, alpha=0.05, method='sidak'))
print('hs', multipletests(tpval, alpha=0.05, method='hs'))
print('sh', multipletests(tpval, alpha=0.05, method='sh'))
pvals = np.array('0.0020 0.0045 0.0060 0.0080 0.0085 0.0090 0.0175 0.0250 '
'0.1055 0.5350'.split(), float)
print('\nexample from lecturnotes')
for meth in ['bonf', 'sidak', 'hs', 'sh']:
print(meth)
print(multipletests(pvals, alpha=0.05, method=meth))
if 'fdrmc' in examples:
mcres = mcfdr(nobs=100, nrepl=1000, ntests=30, ntrue=30, mu=0.1, alpha=0.05, rho=0.3)
mcmeans = np.array(mcres).mean(0)
print(mcmeans)
print(mcmeans[0]/6., 1-mcmeans[1]/4.)
print(mcmeans[:4], mcmeans[-4:])
if 'randmvn' in examples:
rvsmvn = randmvn(0.8, (5000,5))
print(np.corrcoef(rvsmvn, rowvar=0))
print(rvsmvn.var(0))
if 'tukeycrit' in examples:
print(get_tukeyQcrit(8, 8, alpha=0.05), 5.60)
print(get_tukeyQcrit(8, 8, alpha=0.01), 7.47)
if 'multicompdev' in examples:
#development of kruskal-wallis multiple-comparison
#example from matlab file exchange
X = np.array([[7.68, 1], [7.69, 1], [7.70, 1], [7.70, 1], [7.72, 1],
[7.73, 1], [7.73, 1], [7.76, 1], [7.71, 2], [7.73, 2],
[7.74, 2], [7.74, 2], [7.78, 2], [7.78, 2], [7.80, 2],
[7.81, 2], [7.74, 3], [7.75, 3], [7.77, 3], [7.78, 3],
[7.80, 3], [7.81, 3], [7.84, 3], [7.71, 4], [7.71, 4],
[7.74, 4], [7.79, 4], [7.81, 4], [7.85, 4], [7.87, 4],
[7.91, 4]])
xli = [X[X[:,1]==k,0] for k in range(1,5)]
xranks = stats.rankdata(X[:,0])
xranksli = [xranks[X[:,1]==k] for k in range(1,5)]
xnobs = np.array([len(x) for x in xli])
meanranks = [item.mean() for item in xranksli]
sumranks = [item.sum() for item in xranksli]
# equivalent function
#from scipy import special
#-np.sqrt(2.)*special.erfcinv(2-0.5) == stats.norm.isf(0.25)
stats.norm.sf(0.67448975019608171)
stats.norm.isf(0.25)
mrs = np.sort(meanranks)
v1, v2 = np.triu_indices(4,1)
print('\nsorted rank differences')
print(mrs[v2] - mrs[v1])
diffidx = np.argsort(mrs[v2] - mrs[v1])[::-1]
mrs[v2[diffidx]] - mrs[v1[diffidx]]
print('\nkruskal for all pairs')
for i,j in zip(v2[diffidx], v1[diffidx]):
print(i,j, stats.kruskal(xli[i], xli[j]))
mwu, mwupval = stats.mannwhitneyu(xli[i], xli[j], use_continuity=False)
print(mwu, mwupval*2, mwupval*2<0.05/6., mwupval*2<0.1/6.)
uni, intlab = np.unique(X[:,0], return_inverse=True)
groupnobs = np.bincount(intlab)
groupxsum = np.bincount(intlab, weights=X[:,0])
groupxmean = groupxsum * 1.0 / groupnobs
rankraw = X[:,0].argsort().argsort()
groupranksum = np.bincount(intlab, weights=rankraw)
# start at 1 for stats.rankdata :
grouprankmean = groupranksum * 1.0 / groupnobs + 1
assert_almost_equal(grouprankmean[intlab], stats.rankdata(X[:,0]), 15)
gs = GroupsStats(X, useranks=True)
print('\ngroupmeanfilter and grouprankmeans')
print(gs.groupmeanfilter)
print(grouprankmean[intlab])
#the following has changed
#assert_almost_equal(gs.groupmeanfilter, stats.rankdata(X[:,0]), 15)
xuni, xintlab = np.unique(X[:,0], return_inverse=True)
gs2 = GroupsStats(np.column_stack([X[:,0], xintlab]), useranks=True)
#assert_almost_equal(gs2.groupmeanfilter, stats.rankdata(X[:,0]), 15)
rankbincount = np.bincount(xranks.astype(int))
nties = rankbincount[rankbincount > 1]
ntot = float(len(xranks));
tiecorrection = 1 - (nties**3 - nties).sum()/(ntot**3 - ntot)
assert_almost_equal(tiecorrection, stats.tiecorrect(xranks),15)
print('\ntiecorrection for data and ranks')
print(tiecorrection)
print(tiecorrect(xranks))
tot = X.shape[0]
t=500 #168
f=(tot*(tot+1.)/12.)-(t/(6.*(tot-1.)))
f=(tot*(tot+1.)/12.)/stats.tiecorrect(xranks)
print('\npairs of mean rank differences')
for i,j in zip(v2[diffidx], v1[diffidx]):
#pdiff = np.abs(mrs[i] - mrs[j])
pdiff = np.abs(meanranks[i] - meanranks[j])
se = np.sqrt(f * np.sum(1./xnobs[[i,j]] )) #np.array([8,8]))) #Fixme groupnobs[[i,j]] ))
print(i,j, pdiff, se, pdiff/se, pdiff/se>2.6310)
multicomp = MultiComparison(*X.T)
multicomp.kruskal()
gsr = GroupsStats(X, useranks=True)
print('\nexamples for kruskal multicomparison')
for i in range(10):
x1, x2 = (np.random.randn(30,2) + np.array([0, 0.5])).T
skw = stats.kruskal(x1, x2)
mc2=MultiComparison(np.r_[x1, x2], np.r_[np.zeros(len(x1)), np.ones(len(x2))])
newskw = mc2.kruskal()
print(skw, np.sqrt(skw[0]), skw[1]-newskw, (newskw/skw[1]-1)*100)
tablett, restt, arrtt = multicomp.allpairtest(stats.ttest_ind)
tablemw, resmw, arrmw = multicomp.allpairtest(stats.mannwhitneyu)
print('')
print(tablett)
print('')
print(tablemw)
tablemwhs, resmw, arrmw = multicomp.allpairtest(stats.mannwhitneyu, method='hs')
print('')
print(tablemwhs)
if 'last' in examples:
xli = (np.random.randn(60,4) + np.array([0, 0, 0.5, 0.5])).T
#Xrvs = np.array(catstack(xli))
xrvs, xrvsgr = catstack(xli)
multicompr = MultiComparison(xrvs, xrvsgr)
tablett, restt, arrtt = multicompr.allpairtest(stats.ttest_ind)
print(tablett)
xli=[[8,10,9,10,9],[7,8,5,8,5],[4,8,7,5,7]]
x,l = catstack(xli)
gs4 = GroupsStats(np.column_stack([x,l]))
print(gs4.groupvarwithin())
#test_tukeyhsd() #moved to test_multi.py
gmeans = np.array([ 7.71375, 7.76125, 7.78428571, 7.79875])
gnobs = np.array([8, 8, 7, 8])
sd = StepDown(gmeans, gnobs, 0.001, [27])
#example from BKY
pvals = [0.0001, 0.0004, 0.0019, 0.0095, 0.0201, 0.0278, 0.0298, 0.0344, 0.0459,
0.3240, 0.4262, 0.5719, 0.6528, 0.7590, 1.000 ]
#same number of rejection as in BKY paper:
#single step-up:4, two-stage:8, iterated two-step:9
#also alpha_star is the same as theirs for TST
print(fdrcorrection0(pvals, alpha=0.05, method='indep'))
print(fdrcorrection_twostage(pvals, alpha=0.05, iter=False))
res_tst = fdrcorrection_twostage(pvals, alpha=0.05, iter=False)
assert_almost_equal([0.047619, 0.0649], res_tst[-1][:2],3) #alpha_star for stage 2
assert_equal(8, res_tst[0].sum())
print(fdrcorrection_twostage(pvals, alpha=0.05, iter=True))
print('fdr_gbs', multipletests(pvals, alpha=0.05, method='fdr_gbs'))
#multicontrast_pvalues(tstat, tcorr, df)
test_tukey_pvalues()
tukey_pvalues(3.649, 3, 16)
|
bsd-3-clause
|
moonbury/notebooks
|
github/MasteringMLWithScikit-learn/8365OS_04_Codes/ch43.py
|
3
|
1642
|
"""
"""
__author__ = 'gavin'
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics import precision_score, recall_score, roc_auc_score, auc, confusion_matrix
import numpy as np
from scipy.sparse import hstack
blacklist = [l.strip() for l in open('insults/blacklist.csv', 'rb')]
def get_counts(documents):
return np.array([np.sum([c.lower().count(w) for w in blacklist]) for c in documents])
# Note that I cleaned the trianing data by replacing """ with "
train_df = pd.read_csv('insults/train.csv')
X_train_raw, X_test_raw, y_train, y_test = train_test_split(train_df['Comment'], train_df['Insult'])
vectorizer = TfidfVectorizer(max_features=2000, norm='l2', max_df=0.04,
ngram_range=(1, 1), stop_words='english', use_idf=False)
X_train = vectorizer.fit_transform(X_train_raw)
#X_train_counts = get_counts(X_train_raw)
#X_train = hstack((X_train, X_train_counts.reshape(len(X_train_counts), 1)))
X_test = vectorizer.transform(X_test_raw)
#X_test_counts = get_counts(X_test_raw)
#X_test = hstack((X_test, X_test_counts.reshape(len(X_test_counts), 1)))
classifier = LogisticRegression(penalty='l1', C=2)
classifier.fit_transform(X_train, y_train)
predictions = classifier.predict(X_test)
print 'accuracy', classifier.score(X_test, y_test)
print 'precision', precision_score(y_test, predictions)
print 'recall', recall_score(y_test, predictions)
print 'auc', roc_auc_score(y_test, predictions)
print confusion_matrix(y_true=y_test, y_pred=predictions)
|
gpl-3.0
|
olologin/scikit-learn
|
examples/applications/plot_species_distribution_modeling.py
|
55
|
7386
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
|
bsd-3-clause
|
matthiasdiener/spack
|
var/spack/repos/builtin/packages/julia/package.py
|
3
|
10019
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import sys
class Julia(Package):
"""The Julia Language: A fresh approach to technical computing"""
homepage = "http://julialang.org"
url = "https://github.com/JuliaLang/julia/releases/download/v0.4.3/julia-0.4.3-full.tar.gz"
version('master',
git='https://github.com/JuliaLang/julia.git', branch='master')
version('release-0.5',
git='https://github.com/JuliaLang/julia.git', branch='release-0.5')
version('0.6.2', '255d80bc8d56d5f059fe18f0798e32f6')
version('0.5.2', '8c3fff150a6f96cf0536fb3b4eaa5cbb')
version('0.5.1', 'bce119b98f274e0f07ce01498c463ad5')
version('0.5.0', 'b61385671ba74767ab452363c43131fb')
version('release-0.4',
git='https://github.com/JuliaLang/julia.git', branch='release-0.4')
version('0.4.7', '75a7a7dd882b7840829d8f165e9b9078')
version('0.4.6', 'd88db18c579049c23ab8ef427ccedf5d')
version('0.4.5', '69141ff5aa6cee7c0ec8c85a34aa49a6')
version('0.4.3', '8a4a59fd335b05090dd1ebefbbe5aaac')
# TODO: Split these out into jl-hdf5, jl-mpi packages etc.
variant("cxx", default=False, description="Prepare for Julia Cxx package")
variant("hdf5", default=False, description="Install Julia HDF5 package")
variant("mpi", default=True, description="Install Julia MPI package")
variant("plot", default=False,
description="Install Julia plotting packages")
variant("python", default=False,
description="Install Julia Python package")
variant("simd", default=False, description="Install Julia SIMD package")
patch('gc.patch', when='@0.4:0.4.5')
patch('openblas.patch', when='@0.4:0.4.5')
variant('binutils', default=sys.platform != 'darwin',
description="Build via binutils")
# Build-time dependencies:
# depends_on("awk")
depends_on("m4", type="build")
# depends_on("pkgconfig")
# Combined build-time and run-time dependencies:
# (Yes, these are run-time dependencies used by Julia's package manager.)
depends_on("binutils", when='+binutils')
depends_on("cmake @2.8:")
depends_on("curl")
depends_on("git", when='@:0.4')
depends_on("git", when='@release-0.4')
depends_on("openssl")
depends_on("[email protected]:2.8")
# Run-time dependencies:
# depends_on("arpack")
# depends_on("fftw +float")
# depends_on("gmp")
# depends_on("libgit")
# depends_on("mpfr")
# depends_on("openblas")
# depends_on("pcre2")
# ARPACK: Requires BLAS and LAPACK; needs to use the same version
# as Julia.
# BLAS and LAPACK: Julia prefers 64-bit versions on 64-bit
# systems. OpenBLAS has an option for this; make it available as
# variant.
# FFTW: Something doesn't work when using a pre-installed FFTW
# library; need to investigate.
# GMP, MPFR: Something doesn't work when using a pre-installed
# FFTW library; need to investigate.
# LLVM: Julia works only with specific versions, and might require
# patches. Thus we let Julia install its own LLVM.
# Other possible dependencies:
# USE_SYSTEM_OPENLIBM=0
# USE_SYSTEM_OPENSPECFUN=0
# USE_SYSTEM_DSFMT=0
# USE_SYSTEM_SUITESPARSE=0
# USE_SYSTEM_UTF8PROC=0
# USE_SYSTEM_LIBGIT2=0
# Run-time dependencies for Julia packages:
depends_on("hdf5", when="+hdf5", type="run")
depends_on("mpi", when="+mpi", type="run")
depends_on("py-matplotlib", when="+plot", type="run")
def install(self, spec, prefix):
# Julia needs git tags
if os.path.isfile(".git/shallow"):
git = which("git")
git("fetch", "--unshallow")
# Explicitly setting CC, CXX, or FC breaks building libuv, one
# of Julia's dependencies. This might be a Darwin-specific
# problem. Given how Spack sets up compilers, Julia should
# still use Spack's compilers, even if we don't specify them
# explicitly.
options = [
# "CC=cc",
# "CXX=c++",
# "FC=fc",
# "USE_SYSTEM_ARPACK=1",
"override USE_SYSTEM_CURL=1",
# "USE_SYSTEM_FFTW=1",
# "USE_SYSTEM_GMP=1",
# "USE_SYSTEM_MPFR=1",
# "USE_SYSTEM_PCRE=1",
"prefix=%s" % prefix]
if "+cxx" in spec:
if "@master" not in spec:
raise InstallError(
"Variant +cxx requires the @master version of Julia")
options += [
"BUILD_LLVM_CLANG=1",
"LLVM_ASSERTIONS=1",
"USE_LLVM_SHLIB=1"]
with open('Make.user', 'w') as f:
f.write('\n'.join(options) + '\n')
make()
make("install")
# Julia's package manager needs a certificate
cacert_dir = join_path(prefix, "etc", "curl")
mkdirp(cacert_dir)
cacert_file = join_path(cacert_dir, "cacert.pem")
curl = which("curl")
curl("--create-dirs",
"--output", cacert_file,
"https://curl.haxx.se/ca/cacert.pem")
# Put Julia's compiler cache into a private directory
cachedir = join_path(prefix, "var", "julia", "cache")
mkdirp(cachedir)
# Store Julia packages in a private directory
pkgdir = join_path(prefix, "var", "julia", "pkg")
mkdirp(pkgdir)
# Configure Julia
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
if "@master" in spec or "@release-0.5" in spec or "@0.5:" in spec:
# This is required for versions @0.5:
juliarc.write(
'# Point package manager to working certificates\n')
juliarc.write('LibGit2.set_ssl_cert_locations("%s")\n' %
cacert_file)
juliarc.write('\n')
juliarc.write('# Put compiler cache into a private directory\n')
juliarc.write('empty!(Base.LOAD_CACHE_PATH)\n')
juliarc.write('unshift!(Base.LOAD_CACHE_PATH, "%s")\n' % cachedir)
juliarc.write('\n')
juliarc.write('# Put Julia packages into a private directory\n')
juliarc.write('ENV["JULIA_PKGDIR"] = "%s"\n' % pkgdir)
juliarc.write('\n')
# Install some commonly used packages
julia = spec['julia'].command
julia("-e", 'Pkg.init(); Pkg.update()')
# Install HDF5
if "+hdf5" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# HDF5\n')
juliarc.write('push!(Libdl.DL_LOAD_PATH, "%s")\n' %
spec["hdf5"].prefix.lib)
juliarc.write('\n')
julia("-e", 'Pkg.add("HDF5"); using HDF5')
julia("-e", 'Pkg.add("JLD"); using JLD')
# Install MPI
if "+mpi" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# MPI\n')
juliarc.write('ENV["JULIA_MPI_C_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpicc"))
juliarc.write('ENV["JULIA_MPI_Fortran_COMPILER"] = "%s"\n' %
join_path(spec["mpi"].prefix.bin, "mpifort"))
juliarc.write('\n')
julia("-e", 'Pkg.add("MPI"); using MPI')
# Install Python
if "+python" in spec or "+plot" in spec:
with open(join_path(prefix, "etc", "julia", "juliarc.jl"),
"a") as juliarc:
juliarc.write('# Python\n')
juliarc.write('ENV["PYTHON"] = "%s"\n' % spec["python"].home)
juliarc.write('\n')
# Python's OpenSSL package installer complains:
# Error: PREFIX too long: 166 characters, but only 128 allowed
# Error: post-link failed for: openssl-1.0.2g-0
julia("-e", 'Pkg.add("PyCall"); using PyCall')
if "+plot" in spec:
julia("-e", 'Pkg.add("PyPlot"); using PyPlot')
julia("-e", 'Pkg.add("Colors"); using Colors')
# These require maybe gtk and image-magick
julia("-e", 'Pkg.add("Plots"); using Plots')
julia("-e", 'Pkg.add("PlotRecipes"); using PlotRecipes')
julia("-e", 'Pkg.add("UnicodePlots"); using UnicodePlots')
julia("-e", """\
using Plots
using UnicodePlots
unicodeplots()
plot(x->sin(x)*cos(x), linspace(0, 2pi))
""")
# Install SIMD
if "+simd" in spec:
julia("-e", 'Pkg.add("SIMD"); using SIMD')
julia("-e", 'Pkg.status()')
|
lgpl-2.1
|
toobaz/pandas
|
pandas/tests/extension/base/__init__.py
|
2
|
2121
|
"""Base test suite for extension arrays.
These tests are intended for third-party libraries to subclass to validate
that their extension arrays and dtypes satisfy the interface. Moving or
renaming the tests should not be done lightly.
Libraries are expected to implement a few pytest fixtures to provide data
for the tests. The fixtures may be located in either
* The same module as your test class.
* A ``conftest.py`` in the same directory as your test class.
The full list of fixtures may be found in the ``conftest.py`` next to this
file.
.. code-block:: python
import pytest
from pandas.tests.extension.base import BaseDtypeTests
@pytest.fixture
def dtype():
return MyDtype()
class TestMyDtype(BaseDtypeTests):
pass
Your class ``TestDtype`` will inherit all the tests defined on
``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype``
wherever the test requires it. You're free to implement additional tests.
All the tests in these modules use ``self.assert_frame_equal`` or
``self.assert_series_equal`` for dataframe or series comparisons. By default,
they use the usual ``pandas.testing.assert_frame_equal`` and
``pandas.testing.assert_series_equal``. You can override the checks used
by defining the staticmethods ``assert_frame_equal`` and
``assert_series_equal`` on your base test class.
"""
from .casting import BaseCastingTests # noqa
from .constructors import BaseConstructorsTests # noqa
from .dtype import BaseDtypeTests # noqa
from .getitem import BaseGetitemTests # noqa
from .groupby import BaseGroupbyTests # noqa
from .interface import BaseInterfaceTests # noqa
from .io import BaseParsingTests # noqa
from .methods import BaseMethodsTests # noqa
from .missing import BaseMissingTests # noqa
from .ops import BaseArithmeticOpsTests, BaseComparisonOpsTests, BaseOpsUtil # noqa
from .printing import BasePrintingTests # noqa
from .reduce import ( # noqa
BaseBooleanReduceTests,
BaseNoReduceTests,
BaseNumericReduceTests,
)
from .reshaping import BaseReshapingTests # noqa
from .setitem import BaseSetitemTests # noqa
|
bsd-3-clause
|
Chilipp/nc2map
|
mapos/_lineplot_old.py
|
1
|
9432
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import numpy as np
from collections import OrderedDict
from copy import deepcopy
from itertools import izip
from difflib import get_close_matches
from _xyplot import XyPlot
from _simple_plot import SimplePlot
class LinePlot(SimplePlot):
"""class to plot lineplots"""
def __init__(self, ydata, xdata=None, fmt={}, name='line', ax=None,
mapsin=None, meta={}):
"""Initialization method of LinePlot class
Input:
- ydata: Dictionary or array. y-data for the plot. Can be one of the
following types:
-- array with shape (n, m) where n is the number of lines and m
the number of data points per line (Note: m has not to be
the same for all lines)
-- dictionary {'label': [y1, y2, ...],
'label2': [...]}
where the keys (e.g. 'label') is the label for the legend
and the value the 1-dimensional data array for each line.
-- dictionary {'label': {'data': [y1, y2, ...],
'fill': fill values for error range,
'fill...': value for fill_between
any other key-value pair for plt.plot
},
'label2': {...}, ...}
As an explanation:
--- Each subdictionary of ydata will be interpreted as one
line.
--- 'label' is the key which will then be used in the
legend (if not 'label' is set in the inner dictionary)
--- The 'data' key stands for the one dimensional array
for the plot.
--- The 'fill' value indicates the data used for the
error range.
--- keywords starting with 'fill' will be used for the
fill_between method (e.g. fillalpha=0.5 will result
in plt.fill_between(..., alpha = 0.5)
--- any other key-value pair (e.g. color='r') will be used
in the plotting call (i.e. plt.plot(..., color='r')
for this line
- xdata: Dictionary, Array or None.
-- If None, the xdata will be range(n) where n is the length of
the corresponding ydata.
-- If one-dimensional array, this array will be used for all
ydata lines
-- The rest is the same as for ydata
- fmt: Dictionary with formatoption keys for XyFmt class (see below)
- name: name of the line plot instance
- ax: Axes where to plot on. If None, a new axes will be created
- mapsin: nc2map.MapsManager instance the line plot belongs to
- meta: Dictionary containing meta informations that can be used for
texts (e.g. title, etc.)
"""
super(LinePlot, self).__init__(name=name, mapsin=mapsin, ax=ax,
fmt=fmt, meta=meta)
xdata, ydata = self._setlines(xdata, ydata, fmt=fmt)
self.xdata = xdata
self.ydata = ydata
self.lines = {}
self.make_plot()
def make_plot(self):
"""Method that makes the plot"""
# return if not enabled
if not self.fmt.enable:
return
ax = self.ax
plt.sca(ax)
color_cycle = ax._get_lines.color_cycle
if self.fmt.grid is not None:
plt.grid()
if self.fmt.xlim is None:
xlim = (min(min(val['data']) for key, val in self.xdata.items()),
max(max(val['data']) for key, val in self.xdata.items()))
else:
xlim = self.fmt.xlim
plt.xlim(*xlim)
if self.fmt.ylim is not None:
plt.ylim(self.fmt.ylim)
self.xlim = xlim
# make plotting
for line, data in self.ydata.items():
plotdata = data.pop('data')
# y fill data
try:
yfilldata = np.array(data.pop('fill'))
yfill_kwargs = {}
for key, val in data.items():
if key[:-4] == 'fill':
yfill_kwargs[key[4:]] = data.pop(key)
except KeyError:
yfilldata = None
if yfilldata is not None and yfilldata.ndim == 1:
yfilldata = np.array([data['data'] - yfilldata,
data['data'] + yfilldata])
# x fill data
try:
xfilldata = np.array(self.xdata[line].pop('fill'))
xfill_kwargs = {}
for key, val in data.items():
if key[:-4] == 'fill':
xfill_kwargs[key[4:]] = data.pop(key)
except KeyError:
xfilldata = None
if xfilldata is not None and xfilldata.ndim == 1:
xfilldata = np.array([self.xdata[line]['data'] - xfilldata,
self.xdata[line]['data'] + xfilldata])
data.setdefault('label', line)
self.lines[line] = ax.plot(
self.xdata[line]['data'], plotdata, **data)
lcolor = self.lines[line][0].get_color()
if yfilldata is not None:
y_fill_kwargs.setdefault('color', lcolor)
self.lines[line].append(ax.fill_between(
self.xdata[line]['data'], yfilldata[0], yfilldata[1],
**yfill_kwargs))
if xfilldata is not None:
x_fill_kwargs.setdefault('color', lcolor)
self.lines[line].append(ax.fill_betweenx(
plotdata, xfilldata[0], xfilldata[1],
**xfill_kwargs))
if self.fmt.legend is not None:
self.legend = plt.legend(**self.fmt.legend)
self._configureaxes()
plt.draw()
def update(self, ydata={}, xdata={}, lines=None, **kwargs):
"""Update method of LinePlot class
Input:
- ydata: Dictionary {'label': {'key': 'val', ...},
'key': 'val', ...}
where 'label' may be one of the line labels and ('key', 'val')
any value pair which is also possible in __init__ method
If set in the outer dictionary (i.e. not in the inner 'label'
dictionary) they are considered as default items for all lines
- xdata: Dictionary (same structure as ydata)
- lines: List of strings. The strings must correspond to the 'labels'
of the lines as used in self.ydata.keys(). This defines the
which lines to update. If None, all lines will be updated.
Further keywords may be any formatoption keyword from the XyFmt class.
Note: To add new lines, use the addline method instead.
"""
self.ax.clear()
if hasattr(self, 'legend'):
self.legend.remove()
del self.legend
self.fmt.update(kwargs)
self.fmt.update(**{key: val for key, val in ydata.items() if key in
self.fmt._default})
if lines is None:
lines = self.ydata.keys()
for line in lines:
self.ydata[line].update(
{key: val for key, val in ydata.items() if key not in lines})
self.ydata[line].update(ydata.get(line, {}))
self.xdata[line].update(
{key: val for key, val in xdata.items() if key not in lines})
self.xdata[line].update(xdata.get(line, {}))
self.make_plot()
def _setlines(self, xdata, ydata, fmt=None):
if not isinstance(ydata, dict):
try:
iter(ydata[0])
except TypeError:
ydata = [ydata]
if not hasattr(self, 'ydata'):
n = 0
else:
n = len(self.lines)
ydata = OrderedDict([('line%i' % i, {'data': ydata[i-n]})
for i in xrange(n, len(ydata)+n)])
else:
for line, data in ydata.items():
if not isinstance(data, dict):
ydata[line] = {'data': data}
if xdata is None:
xdata = {key: {'data': range(len(val['data']))}
for key, val in ydata.items()}
elif not isinstance(xdata, dict):
keys = sorted(ydata.keys())
xdata = {key: {'data': xdata} for key in keys}
else:
for line, data in xdata.items():
if not isinstance(data, dict):
xdata[line] = {'data': data}
return xdata, ydata
def addline(self, ydata, xdata=None):
xdata, ydata = self._setlines(xdata, ydata)
self.xdata.update(xdata)
self.ydata.update(ydata)
self.ax.clear()
if hasattr(self, 'legend'):
self.legend.remove()
del self.legend
self.make_plot()
def show(self):
plt.show(block=False)
def close(self):
plt.close(self.ax.get_figure())
del self.xdata
del self.ydata
|
gpl-2.0
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/numpy/lib/polynomial.py
|
7
|
38749
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
# allowing this makes p.coeffs *= 2 legal
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
mit
|
luo66/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
268
|
1091
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
kiyoto/statsmodels
|
statsmodels/tsa/descriptivestats.py
|
33
|
2304
|
# -*- coding: utf-8 -*-
"""Descriptive Statistics for Time Series
Created on Sat Oct 30 14:24:08 2010
Author: josef-pktd
License: BSD(3clause)
"""
import numpy as np
from . import stattools as stt
#todo: check subclassing for descriptive stats classes
class TsaDescriptive(object):
'''collection of descriptive statistical methods for time series
'''
def __init__(self, data, label=None, name=''):
self.data = data
self.label = label
self.name = name
def filter(self, num, den):
from scipy.signal import lfilter
xfiltered = lfilter(num, den, self.data)
return self.__class__(xfiltered, self.label, self.name + '_filtered')
def detrend(self, order=1):
from . import tsatools
xdetrended = tsatools.detrend(self.data, order=order)
return self.__class__(xdetrended, self.label, self.name + '_detrended')
def fit(self, order=(1,0,1), **kwds):
from .arima_model import ARMA
self.mod = ARMA(self.data)
self.res = self.mod.fit(order=order, **kwds)
#self.estimated_process =
return self.res
def acf(self, nlags=40):
return stt.acf(self.data, nlags=nlags)
def pacf(self, nlags=40):
return stt.pacf(self.data, nlags=nlags)
def periodogram(self):
#doesn't return frequesncies
return stt.periodogram(self.data)
# copied from fftarma.py
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
data = self.data
acf = self.acf(nacf)
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq, endpoint=False)
spdr = self.periodogram()[:nfreq] #(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
namestr = ' for %s' % self.name if self.name else ''
ax.plot(data)
ax.set_title('Time series' + namestr)
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation' + namestr)
ax = fig.add_subplot(2,2,3)
ax.plot(spdr) # (wr, spdr)
ax.set_title('Power Spectrum' + namestr)
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation' + namestr)
return fig
|
bsd-3-clause
|
Nyker510/scikit-learn
|
sklearn/decomposition/dict_learning.py
|
83
|
44062
|
""" Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
|
bsd-3-clause
|
cybernet14/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
215
|
11427
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
pnedunuri/scikit-learn
|
sklearn/neighbors/approximate.py
|
71
|
22357
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
bsd-3-clause
|
nguyentu1602/statsmodels
|
statsmodels/tsa/filters/hp_filter.py
|
27
|
3507
|
from __future__ import absolute_import
from scipy import sparse
from scipy.sparse import dia_matrix, eye as speye
from scipy.sparse.linalg import spsolve
import numpy as np
from ._utils import _maybe_get_pandas_wrapper
def hpfilter(X, lamb=1600):
"""
Hodrick-Prescott filter
Parameters
----------
X : array-like
The 1d ndarray timeseries to filter of length (nobs,) or (nobs,1)
lamb : float
The Hodrick-Prescott smoothing parameter. A value of 1600 is
suggested for quarterly data. Ravn and Uhlig suggest using a value
of 6.25 (1600/4**4) for annual data and 129600 (1600*3**4) for monthly
data.
Returns
-------
cycle : array
The estimated cycle in the data given lamb.
trend : array
The estimated trend in the data given lamb.
Examples
---------
>>> import statsmodels.api as sm
>>> import pandas as pd
>>> dta = sm.datasets.macrodata.load_pandas().data
>>> dates = sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3')
>>> index = pd.DatetimeIndex(dates)
>>> dta.set_index(index, inplace=True)
>>> cycle, trend = sm.tsa.filters.hpfilter(dta.realgdp, 1600)
>>> gdp_decomp = dta[['realgdp']]
>>> gdp_decomp["cycle"] = cycle
>>> gdp_decomp["trend"] = trend
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax,
... fontsize=16);
>>> plt.show()
.. plot:: plots/hpf_plot.py
Notes
-----
The HP filter removes a smooth trend, `T`, from the data `X`. by solving
min sum((X[t] - T[t])**2 + lamb*((T[t+1] - T[t]) - (T[t] - T[t-1]))**2)
T t
Here we implemented the HP filter as a ridge-regression rule using
scipy.sparse. In this sense, the solution can be written as
T = inv(I - lamb*K'K)X
where I is a nobs x nobs identity matrix, and K is a (nobs-2) x nobs matrix
such that
K[i,j] = 1 if i == j or i == j + 2
K[i,j] = -2 if i == j + 1
K[i,j] = 0 otherwise
References
----------
Hodrick, R.J, and E. C. Prescott. 1980. "Postwar U.S. Business Cycles: An
Empricial Investigation." `Carnegie Mellon University discussion
paper no. 451`.
Ravn, M.O and H. Uhlig. 2002. "Notes On Adjusted the Hodrick-Prescott
Filter for the Frequency of Observations." `The Review of Economics and
Statistics`, 84(2), 371-80.
"""
_pandas_wrapper = _maybe_get_pandas_wrapper(X)
X = np.asarray(X, float)
if X.ndim > 1:
X = X.squeeze()
nobs = len(X)
I = speye(nobs,nobs)
offsets = np.array([0,1,2])
data = np.repeat([[1.],[-2.],[1.]], nobs, axis=1)
K = dia_matrix((data, offsets), shape=(nobs-2,nobs))
import scipy
if (X.dtype != np.dtype('<f8') and
int(scipy.__version__[:3].split('.')[1]) < 11):
#scipy umfpack bug on Big Endian machines, will be fixed in 0.11
use_umfpack = False
else:
use_umfpack = True
if scipy.__version__[:3] == '0.7':
#doesn't have use_umfpack option
#will be broken on big-endian machines with scipy 0.7 and umfpack
trend = spsolve(I+lamb*K.T.dot(K), X)
else:
trend = spsolve(I+lamb*K.T.dot(K), X, use_umfpack=use_umfpack)
cycle = X-trend
if _pandas_wrapper is not None:
return _pandas_wrapper(cycle), _pandas_wrapper(trend)
return cycle, trend
|
bsd-3-clause
|
BlackArbsCEO/trading-with-python
|
lib/classes.py
|
76
|
7847
|
"""
worker classes
@author: Jev Kuznetsov
Licence: GPL v2
"""
__docformat__ = 'restructuredtext'
import os
import logger as logger
import yahooFinance as yahoo
from functions import returns, rank
from datetime import date
from pandas import DataFrame, Series
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Symbol(object):
'''
Symbol class, the foundation of Trading With Python library,
This class acts as an interface to Yahoo data, Interactive Brokers etc
'''
def __init__(self,name):
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('class created.')
self.dataDir = os.getenv("USERPROFILE")+'\\twpData\\symbols\\'+self.name
self.log.debug('Data dir:'+self.dataDir)
self.ohlc = None # historic OHLC data
def downloadHistData(self, startDate=(2010,1,1),endDate=date.today().timetuple()[:3],\
source = 'yahoo'):
'''
get historical OHLC data from a data source (yahoo is default)
startDate and endDate are tuples in form (d,m,y)
'''
self.log.debug('Getting OHLC data')
self.ohlc = yahoo.getHistoricData(self.name,startDate,endDate)
def histData(self,column='adj_close'):
'''
Return a column of historic data.
Returns
-------------
df : DataFrame
'''
s = self.ohlc[column]
return DataFrame(s.values,s.index,[self.name])
@property
def dayReturns(self):
''' close-close returns '''
return (self.ohlc['adj_close']/self.ohlc['adj_close'].shift(1)-1)
#return DataFrame(s.values,s.index,[self.name])
class Portfolio(object):
def __init__(self,histPrice,name=''):
"""
Constructor
Parameters
----------
histPrice : historic price
"""
self.histPrice = histPrice
self.params = DataFrame(index=self.symbols)
self.params['capital'] = 100*np.ones(self.histPrice.shape[1],dtype=np.float)
self.params['last'] = self.histPrice.tail(1).T.ix[:,0]
self.params['shares'] = self.params['capital']/self.params['last']
self.name= name
def setHistPrice(self,histPrice):
self.histPrice = histPrice
def setShares(self,shares):
""" set number of shares, adjust capital
shares: list, np array or Series
"""
if len(shares) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['shares'] = shares
self.params['capital'] = self.params['shares']*self.params['last']
def setCapital(self,capital):
""" Set target captial, adjust number of shares """
if len(capital) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['capital'] = capital
self.params['shares'] = self.params['capital']/self.params['last']
def calculateStatistics(self,other=None):
''' calculate spread statistics, save internally '''
res = {}
res['micro'] = rank(self.returns[-1],self.returns)
res['macro'] = rank(self.value[-1], self.value)
res['last'] = self.value[-1]
if other is not None:
res['corr'] = self.returns.corr(returns(other))
return Series(res,name=self.name)
@property
def symbols(self):
return self.histPrice.columns.tolist()
@property
def returns(self):
return (returns(self.histPrice)*self.params['capital']).sum(axis=1)
@property
def value(self):
return (self.histPrice*self.params['shares']).sum(axis=1)
def __repr__(self):
return ("Portfolio %s \n" % self.name ) + str(self.params)
#return ('Spread %s :' % self.name ) + str.join(',',
# ['%s*%.2f' % t for t in zip(self.symbols,self.capital)])
class Spread(object):
'''
Spread class, used to build a spread out of two symbols.
'''
def __init__(self,stock,hedge,beta=None):
''' init with symbols or price series '''
if isinstance(stock,str) and isinstance(hedge,str):
self.symbols = [stock,hedge]
self._getYahooData()
elif isinstance(stock,pd.Series) and isinstance(hedge,pd.Series):
self.symbols = [stock.name,hedge.name]
self.price = pd.DataFrame(dict(zip(self.symbols,[stock,hedge]))).dropna()
else:
raise ValueError('Both stock and hedge should be of the same type, symbol string or Series')
# calculate returns
self.returns = self.price.pct_change()
if beta is not None:
self.beta = beta
else:
self.estimateBeta()
# set data
self.data = pd.DataFrame(index = self.symbols)
self.data['beta'] = pd.Series({self.symbols[0]:1., self.symbols[1]:-self.beta})
def calculateShares(self,bet):
''' set number of shares based on last quote '''
if 'price' not in self.data.columns:
print 'Getting quote...'
self.getQuote()
self.data['shares'] = bet*self.data['beta']/self.data['price']
def estimateBeta(self,plotOn=False):
""" linear estimation of beta """
x = self.returns[self.symbols[1]] # hedge
y = self.returns[self.symbols[0]] # stock
#avoid extremes
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
if plotOn:
plt.plot(x,y,'o')
plt.grid(True)
iteration = 1
nrOutliers = 1
while iteration < 3 and nrOutliers > 0 :
(a,b) = np.polyfit(x,y,1)
yf = np.polyval([a,b],x)
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
if plotOn:
yf = x*beta
plt.plot(x,yf,'-',color='red')
plt.xlabel(self.symbols[1])
plt.ylabel(self.symbols[0])
self.beta = beta
return beta
@property
def spread(self):
''' return daily returns of the pair '''
return (self.returns*self.data['beta']).sum(1)
def getQuote(self):
''' get current quote from yahoo '''
q = yahoo.getQuote(self.symbols)
self.data['price'] = q['last']
def _getYahooData(self, startDate=(2007,1,1)):
""" fetch historic data """
data = {}
for symbol in self.symbols:
print 'Downloading %s' % symbol
data[symbol]=(yahoo.getHistoricData(symbol,sDate=startDate)['adj_close'] )
self.price = pd.DataFrame(data).dropna()
def __repr__(self):
return 'Spread 1*%s & %.2f*%s ' % (self.symbols[0],-self.beta,self.symbols[1])
@property
def name(self):
return str.join('_',self.symbols)
if __name__=='__main__':
s = Spread(['SPY','IWM'])
|
bsd-3-clause
|
hfutsuchao/Python2.6
|
stocks/strategey_classes/stock_tech_corr.py
|
1
|
11317
|
#coding:utf-8
import pandas as pd
import numpy as np
import talib
import cral_CNstock_order_ana
class StockTeckCorrStrategy(object):
def __init__(self,df,start_date='0',end_date='9',norm_type='character',quota_index=0,lost=1.0):
self.__df = df
self.__start_date = start_date
self.__end_date = end_date
self.__norm_type = norm_type
self.__quota_index = quota_index
self.__lost = lost
def get_quota(self):
#stock_amount = cral_CNstock_order_ana.main()
close = self.__df['close']
high_prices = self.__df['high'].values
low_prices = self.__df['low'].values
close_prices = close.values
ma5 = talib.MA(close_prices,5)
ma10 = talib.MA(close_prices,10)
ma20 = talib.MA(close_prices,20)
ma30 = talib.MA(close_prices,30)
K, D = talib.STOCH(high_prices,low_prices,close_prices, fastk_period=9, slowk_period=3)
J = K * 3 - D * 2
sar = talib.SAR(high_prices, low_prices, acceleration=0.05, maximum=0.2)
sar = pd.DataFrame(sar-close)
sar.index = self.__df.date
atr = talib.ATR(high_prices,low_prices,close_prices)
natr = talib.NATR(high_prices,low_prices,close_prices)
trange = talib.TRANGE(high_prices,low_prices,close_prices)
cci = talib.CCI(high_prices,low_prices,close_prices,14)
dif, dea, bar = talib.MACDFIX(close_prices)
bar = bar * 2
df_all = self.__df.drop(['code','open','low', 'high','volume'],axis=1).set_index('date')
df_all.insert(0,'ma5',ma5)
df_all.insert(0,'ma10',ma10)
df_all.insert(0,'ma20',ma20)
df_all.insert(0,'ma30',ma30)
df_all.insert(0,'K',K)
df_all.insert(0,'D',D)
df_all.insert(0,'J',J)
df_all.insert(0,'cci',cci)
df_all.insert(0,'bar',bar)
df_all.insert(0,'dif',dif)
df_all.insert(0,'dea',dea)
df_all.insert(0,'sar',sar)
#df_all = pd.concat([df_all,stock_amount],axis=1)
df_yesterday = df_all.T
index_c = df_all.index
added = [np.nan] * len(df_all.columns)
df_yesterday.insert(0, len(df_yesterday.columns), added)
df_yesterday = df_yesterday.T
df_yesterday = df_yesterday.drop(df_all.index[len(df_all.index)-1])
df_yesterday.insert(0, 'index_c', index_c)
df_yesterday = df_yesterday.set_index('index_c')
df_dif = df_all - df_yesterday
df_dif_close_plus_one_day = df_dif.copy()
for i in range(len(df_dif_close_plus_one_day['close'])-1):
df_dif_close_plus_one_day['close'][i] = df_dif_close_plus_one_day['close'][i+1]
df_dif_close_plus_one_day['close'][len(df_dif_close_plus_one_day['close'])-1] = np.nan
df_dif = df_dif.dropna(axis=0,how='any')
df_dif_close_plus_one_day = df_dif_close_plus_one_day.dropna(axis=0,how='any')
return df_dif, df_dif_close_plus_one_day
def get_normlized(self,df):
df_norm = df.copy()
if self.__norm_type == 'max':
for column in df_norm.columns:
df_norm[column] = df_norm[column] / abs(df_norm[column]).max()
elif self.__norm_type == 'character':
for column in df_norm.columns:
df_norm[column].ix[df_norm[column] <= 0] = -1
df_norm[column].ix[df_norm[column] > 0] = 1
else:
return None
return df_norm
def get_trade_chance(self):
#df,close,norm_type,start_date,end_date,lost
rate = {}
rate['based'] = {}
rate['based']['profit'] = {}
buy_price = {}
buy_date = {}
sell_price = {}
sell_date = {}
is_buy = {}
is_sell = {}
df_dif_norm = self.get_normlized(self.df,self.__norm_type)
df_dif_norm_corr = self.df_dif_norm.corr().ix['close']
start_date_open = 0
end_date_open = 0
for idx in range(len(self.df_dif_norm)):
date_this = self.df_dif_norm.index[idx]
close_val = self.close[idx]
if date_this < self.__start_date:
continue
if date_this > self.__end_date:
end_date_open = close_val
break
sign = 0
for key_name in df_dif_norm.drop('close',axis=1).columns:
sign = sign + df_dif_norm.ix[date_this,key_name] * df_dif_norm_corr[key_name]
if start_date_open == 0:
start_date_open = close_val
x = idx
if idx>=1:
lastdate = df_dif_norm.index[idx-1]
if lastdate not in rate['based']['profit']:
rate['based']['profit'][lastdate] = 1.0
rate['based']['profit'][date_this] = rate['based']['profit'][lastdate] * self.close[idx] / self.close[idx-1]
for m in np.array(range(-100,200,5))/20.0:
for n in np.array(range(-100,int(50*m+1),5))/20.0:
s_type = 'corr' + str(m) + '_' + str(n)
if s_type not in buy_price:
buy_price[s_type] = []
buy_date[s_type] = []
sell_price[s_type] = []
sell_date[s_type] = []
is_buy[s_type] = 0
#is_sell[s_type] = 0
if sign>=m:
if is_buy[s_type] == 0:
is_buy[s_type] = 1
buy_price[s_type].append(close_val)
buy_date[s_type].append(date_this)
#is_sell[s_type] = 0
continue
if sign<n or (len(buy_price[s_type]) and close_val * (1-0.002) / buy_price[s_type][-1] <= (1-self.lost)):
if is_buy[s_type] == 1 : #and is_sell[s_type] == 0
is_buy[s_type] = 0
sell_price[s_type].append(close_val)
sell_date[s_type].append(date_this)
#is_sell[s_type] = 1
if not end_date_open:
end_date_open = close_val
if not start_date_open:
return []
rate['based']['profit']['total'] = end_date_open * (1 - 0.002) / start_date_open
return rate, date_this, buy_price, buy_date, sell_price, sell_date, start_date_open, end_date_open
def back_test(df,start_date='0',date_delta=60,norm_type='character',quota_index=0,lost=1.0):
if self.__start_date:
end_date = date_add(start_date,date_delta)
else:
end_date = '9'
if end_date > today():
return []
df_dif = get_quota(df)[quota_index]
df_dif_norm = get_normlized(df_dif)
df = pd.concat([df.set_index('date')['close'],df_dif_norm['sar']],axis=1).dropna(how='any')
close = df['close']
r = get_trade_chance(df_dif,close,norm_type,start_date,end_date,lost)
if r:
rate, date_this, buy_price, buy_date, sell_price, sell_date, start_date_open, end_date_open = r
else:
return []
for s_type in sell_price:
rate[s_type] = {}
rate[s_type]['profit'] = {}
rate[s_type]['profit']['total'] = 1.0
rate[s_type]['trade'] = {}
for i in range(len(buy_price[s_type])):
try:
#rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (sell_price[s_type][i] * (1 - 0.002) / buy_price[s_type][i])
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (sell_price[s_type][i] * (1 - 0.002) / buy_price[s_type][i]) * ((sell_price[s_type][i]) * (1 - 0.002) / buy_price[s_type][i+1])
rate[s_type]['profit'][buy_date[s_type][i]] = rate[s_type]['profit']['total']
rate[s_type]['trade'][buy_date[s_type][i]] = [buy_date[s_type][i], buy_price[s_type][i], sell_date[s_type][i], sell_price[s_type][i]]
except Exception,e:
if len(buy_price[s_type]) == len(sell_price[s_type]):
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (end_date_open * (1 - 0.002) / sell_price[s_type][i])
else:
rate[s_type]['profit']['total'] = rate[s_type]['profit']['total'] * (end_date_open * (1 - 0.002) / buy_price[s_type][i])
rate[s_type]['profit'][date_this] = rate[s_type]['profit']['total']
rate[s_type]['trade'][date_this] = [buy_date[s_type][i], buy_price[s_type][i], 'lastday', end_date_open]
return sorted(rate.items(),key=lambda x:x[1]['profit']['total'],reverse=True)
def plot_profit(rate,s_type=''):
for code in rate:
best_strategy_code = rate[code][0][0]
rate_dic = dict(rate[code])
based_profit = pd.DataFrame(rate_dic['based']).drop('total',axis=0)
if s_type:
best_strategy_profit = pd.DataFrame(rate_dic[s_type]).fillna(method='pad').drop('total',axis=0)
best_strategy_code = s_type
else:
if rate[code][0][0] == 'based':
best_strategy_profit = pd.DataFrame(rate_dic[rate[code][1][0]]).fillna(method='pad').drop('total',axis=0)
else:
best_strategy_profit = pd.DataFrame(rate_dic[rate[code][0][0]]).fillna(method='pad').drop('total',axis=0)
profit_all = pd.concat([based_profit['profit'], best_strategy_profit['profit']], axis=1).fillna(method='pad')
profit_all.plot()
plt.legend(('based_profit', 'best_strategy_profit'), loc='upper left')
plt.title(code + '_' + best_strategy_code)
plt.savefig('/Users/NealSu/Downloads/profit_pic/' + code + '_' + best_strategy_code + '.jpg')
plt.close('all')
try:
print code
print best_strategy_profit['trade']
except:
pass
def strategy_choose(rate):
strategy_sum = {}
best_strategy = {}
for code in rate:
rate_dic = dict(rate[code])
best_strategy_code = rate[code][0][0]
if best_strategy_code not in best_strategy:
best_strategy[best_strategy_code] = 1
else:
best_strategy[best_strategy_code] = best_strategy[best_strategy_code] + 1
for s_type in rate_dic:
if s_type not in strategy_sum:
strategy_sum[s_type] = rate_dic[s_type]['profit']['total']
else:
strategy_sum[s_type] = strategy_sum[s_type] + rate_dic[s_type]['profit']['total']
best_strategy = sorted(best_strategy.items(),key=lambda x:x[1],reverse=True)
strategy_sum = sorted(strategy_sum.items(),key=lambda x:x[1],reverse=True)
return (best_strategy,strategy_sum)
def single_test(df,start_dates,date_deltas,norm_type,quota_index):
rate = {}
for start_date in start_dates:
for date_delta in date_deltas:
r = back_test(df, start_date, date_delta, norm_type, quota_index)
if r:
rate[start_date+'_'+date_add(start_date,date_delta)] = r
return rate
|
gpl-2.0
|
jmatt/cloaked-sombrero
|
visuals/stacked_bar_node_by_flavor.py
|
1
|
2205
|
from collections import OrderedDict
from itertools import chain, repeat
import os
import numpy as np
import matplotlib.pyplot as plt
DATA_PATH = "~/dev/bio5/cloaked-sombrero/data/nodes_by_flavor.txt"
def get_data():
return open(os.path.expanduser(DATA_PATH)).readlines()[1:]
def by_node(data):
"""
Split lines from OpenStack mysql nova database by node, flavor
and the count of that flavor per node into a dictionary of
dictionaries.
"""
coll = {}
for line in data:
val, flavor, node = line.split()
if not node in coll.keys():
coll[node] = {}
coll[node][flavor] = int(val)
return coll
def by_flavor(nodes):
"""
Transform nodes collection to group by flavors for visualization.
"""
flavor_names = set(
chain(
*[fd.keys() for node, fd in nodes.iteritems()]))
coll = OrderedDict({f: [] for f in flavor_names})
node_names = nodes.keys()
node_names.sort()
for node_name in node_names:
for flavor_name in flavor_names:
if flavor_name in nodes[node_name].keys():
coll[flavor_name].append(nodes[node_name][flavor_name])
else:
coll[flavor_name].append(0)
return coll
def build_visual(data):
nodes = by_node(data)
flavors = by_flavor(nodes)
flavor_size = len(flavors.keys())
node_names = sorted(nodes.keys())
node_size = len(nodes.keys())
btm = [0] * node_size
ind = np.arange(0, node_size * 1.5, 1.5)
width = 0.86
colors= ["r", "b", "g", "dimgray", "darkcyan", "lime",
"navy", "teal", "indigo", "y", "skyblue", "sage"]
color_labels = []
c = 0
plt.xticks(ind + width/2., node_names, rotation=70)
plt.title("Node resource usage by flavor")
for f,v in flavors.iteritems():
b = plt.bar(ind,
v,
width,
color=colors[c % len(colors)],
bottom=btm)
color_labels.append(b[0])
c += 1
btm = [f + b for f, b in zip(btm, v)]
plt.legend(color_labels, flavors.keys())
if __name__ == "__main__":
build_visual(get_data())
plt.show()
|
mit
|
MJuddBooth/pandas
|
pandas/tests/frame/test_indexing.py
|
1
|
132839
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import date, datetime, time, timedelta
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PY2, long, lrange, lzip, map, range, zip
from pandas.core.dtypes.common import is_float_dtype, is_integer, is_scalar
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,
Timestamp, compat, date_range, isna, notna)
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from pandas.tseries.offsets import BDay
class TestDataFrameIndexing(TestData):
def test_getitem(self):
# Slicing
sl = self.frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in compat.iteritems(sl):
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in compat.iteritems(self.frame._series):
assert self.frame[key] is not None
assert 'random' not in self.frame
with pytest.raises(KeyError, match='random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df['@awesome_domain'] = ad
with pytest.raises(KeyError):
df.__getitem__('df["$10"]')
res = df['@awesome_domain']
tm.assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
with pytest.raises(KeyError):
df[['baf']]
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
assert self.frame.get('foo') is None
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
@pytest.mark.parametrize("df", [
DataFrame(),
DataFrame(columns=list("AB")),
DataFrame(columns=list("AB"), index=range(3))
])
def test_get_none(self, df):
# see gh-5652
assert df.get(None) is None
def test_loc_iterable(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"idx_type",
[list, iter, Index, set,
lambda l: dict(zip(l, range(len(l)))),
lambda l: dict(zip(l, range(len(l)))).keys()],
ids=["list", "iter", "Index", "set", "dict", "dict_keys"])
@pytest.mark.parametrize("levels", [1, 2])
def test_getitem_listlike(self, idx_type, levels):
# GH 21294
if levels == 1:
frame, missing = self.frame, 'food'
else:
# MultiIndex columns
frame = DataFrame(np.random.randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')],
name=('sth', 'sth2')))
missing = ('good', 'food')
keys = [frame.columns[1], frame.columns[0]]
idx = idx_type(keys)
idx_check = list(idx_type(keys))
result = frame[idx]
expected = frame.loc[:, idx_check]
expected.columns.names = frame.columns.names
assert_frame_equal(result, expected)
idx = idx_type(keys + [missing])
with pytest.raises(KeyError, match='not in index'):
frame[idx]
@pytest.mark.parametrize("val,expected", [
(2**63 - 1, Series([1])),
(2**63, Series([2])),
])
def test_loc_uint64(self, val, expected):
# see gh-19399
df = DataFrame([1, 2], index=[2**63 - 1, 2**63])
result = df.loc[val]
expected.name = val
tm.assert_series_equal(result, expected)
def test_getitem_callable(self):
# GH 12533
result = self.frame[lambda x: 'A']
tm.assert_series_equal(result, self.frame.loc[:, 'A'])
result = self.frame[lambda x: ['A', 'B']]
tm.assert_frame_equal(result, self.frame.loc[:, ['A', 'B']])
df = self.frame[:3]
result = df[lambda x: [True, False, True]]
tm.assert_frame_equal(result, self.frame.iloc[[0, 2], :])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
msg = 'Columns must be same length as key'
with pytest.raises(ValueError, match=msg):
data[['A']] = self.frame[['A', 'B']]
msg = 'Length of values does not match length of index'
with pytest.raises(ValueError, match=msg):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.loc[1, ['tt1', 'tt2']] = [1, 2]
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.loc[df.index[1], ['tt1', 'tt2']] = ['1', '2']
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006', periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_setitem_callable(self):
# GH 12533
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]})
df[lambda x: 'A'] = [11, 12, 13, 14]
exp = pd.DataFrame({'A': [11, 12, 13, 14], 'B': [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH 13299
def inc(x):
return x + 1
df = pd.DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = pd.DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match='Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match='boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [self.tsframe, self.mixed_frame,
self.mixed_float, self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
continue
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame({c: np.where(data[c] > 0, data[c], np.nan)
for c in data.columns},
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32': 2, 'int64': 2})
assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ['E1', 'F1']] = 0
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data=np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isna(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=['A', 'B', 'C', 'D', 'E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
assert_frame_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
# 11320
df = pd.DataFrame({"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30]},
columns=['rna', -1000, 0, 1000])
result = df[[1000]]
expected = df.iloc[:, [3]]
assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
self.frame.ix[:, [-1]] = 0
assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
# ix does label-based indexing when having an integer index
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
with pytest.raises(KeyError):
df.ix[[-1]]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
with pytest.raises(KeyError):
df.ix[:, [-1]]
# #1942
a = DataFrame(np.random.randn(20, 2),
index=[chr(x + 65) for x in range(20)])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
a.ix[-1] = a.ix[-2]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
assert a.ix[-1].name == 'T'
assert a.ix[-2].name == 'S'
def test_getattr(self):
assert_series_equal(self.frame.A, self.frame['A'])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
self.frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
assert 'col5' in self.frame
assert len(series) == 15
assert len(self.frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=self.frame.index, name='col5')
tm.assert_series_equal(self.frame['col5'], exp)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_series_equal(series, self.frame['col6'], check_names=False)
with pytest.raises(KeyError):
self.frame[np.random.randn(len(self.frame) + 1)] = 1
# set ndarray
arr = np.random.randn(len(self.frame))
self.frame['col9'] = arr
assert (self.frame['col9'] == arr).all()
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
with pytest.raises(com.SettingWithCopyError):
smaller['col10'] = ['1', '2']
assert smaller['col10'].dtype == np.object_
assert (smaller['col10'] == ['1', '2']).all()
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype):
arr = np.random.randn(len(self.frame))
self.frame[dtype] = np.array(arr, dtype=dtype)
assert self.frame[dtype].dtype.name == dtype
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame[
'A'], check_names=False)
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = np.nan
assert notna(s[5:10]).all()
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"mask_type",
[lambda df: df > np.abs(df) / 2,
lambda df: (df > np.abs(df) / 2).values],
ids=['dataframe', 'array'])
def test_setitem_boolean_mask(self, mask_type):
# Test for issue #18582
df = self.frame.copy()
mask = mask_type(df)
# index with boolean mask
result = df.copy()
result[mask] = np.nan
expected = df.copy()
expected.values[np.array(mask)] = np.nan
assert_frame_equal(result, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
assert self.frame['D'].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
assert self.frame['B'].dtype == np.int64
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
assert issubclass(self.frame['B'].dtype.type, np.integer)
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
assert self.frame['foo'].dtype == np.int64
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
assert self.frame['foo'].dtype == np.float64
self.frame['something'] = 0
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2.5
assert self.frame['something'].dtype == np.float64
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10, 'event'] = 'foo'
result = df.get_dtype_counts().sort_values()
expected = Series({'float64': 3, 'object': 1}).sort_values()
assert_series_equal(result, expected)
# Test that data type is preserved . #5782
df = DataFrame({'one': np.arange(6, dtype=np.int8)})
df.loc[1, 'one'] = 6
assert df.dtypes.one == np.dtype(np.int8)
df.one = np.int8(7)
assert df.dtypes.one == np.dtype(np.int8)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.loc[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_frame_setitem_timestamp(self):
# GH#2155
columns = date_range(start='1/1/2012', end='2/1/2012', freq=BDay())
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
assert 'B' in df
assert len(df.columns) == 2
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm['C'] = 1
assert dm['C'].dtype == np.int64
dm['E'] = 1.
assert dm['E'].dtype == np.float64
# set existing column
dm['A'] = 'bar'
assert 'bar' == dm['A'][0]
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
assert dm['foo'].dtype == np.object_
dm['coercable'] = ['1', '2', '3']
assert dm['coercable'].dtype == np.object_
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.loc[ix, ['title']] = 'foobar'
df.loc[ix, ['cruft']] = 0
assert df.loc[1, 'title'] == 'foobar'
assert df.loc[1, 'cruft'] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_clear_caches(self):
# see gh-304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.loc[df.index[2:], 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')
assert df['z'] is not foo
tm.assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(
self.frame.iloc[:, -1], self.frame['A'], check_names=False)
assert_series_equal(self.frame.loc[:, None], self.frame[
'A'], check_names=False)
assert_series_equal(self.frame[None], self.frame[
'A'], check_names=False)
repr(self.frame)
def test_setitem_empty(self):
# GH 9596
df = pd.DataFrame({'a': ['1', '2', '3'],
'b': ['11', '22', '33'],
'c': ['111', '222', '333']})
result = df.copy()
result.loc[result.b.isna(), 'a'] = result.a
assert_frame_equal(result, df)
@pytest.mark.parametrize("dtype", ["float", "int64"])
@pytest.mark.parametrize("kwargs", [
dict(),
dict(index=[1]),
dict(columns=["A"])
])
def test_setitem_empty_frame_with_boolean(self, dtype, kwargs):
# see gh-10126
kwargs["dtype"] = dtype
df = DataFrame(**kwargs)
df2 = df.copy()
df[df > df2] = 47
assert_frame_equal(df, df2)
def test_setitem_scalars_no_index(self):
# GH16823 / 17894
df = DataFrame()
df['foo'] = 1
expected = DataFrame(columns=['foo']).astype(np.int64)
assert_frame_equal(df, expected)
def test_getitem_empty_frame_with_boolean(self):
# Test for issue #11859
df = pd.DataFrame()
df2 = df[df > 0]
assert_frame_equal(df, df2)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
assert len(f.columns) == 3
with pytest.raises(KeyError, match=r"^'D'$"):
del f['D']
del f['B']
assert len(f.columns) == 2
def test_getitem_fancy_2d(self):
f = self.frame
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[5:10], f[5:10])
assert_frame_equal(f.ix[5:10, :], f[5:10])
assert_frame_equal(f.ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5],
columns=['A', 'B']))
# slice rows with labels, inclusive!
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
expected = f.ix[5:11]
result = f.ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(f.ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
exp = f.copy()
f.ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
msg = "Cannot index with multidimensional key"
with pytest.raises(ValueError, match=msg):
f.ix[f > 0.5]
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.loc[52195.1:52196.5]
assert len(s1) == 2
s1 = df.loc[52195.1:52196.6]
assert len(s1) == 2
s1 = df.loc[52195.1:52198.9]
assert len(s1) == 3
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
def test_setitem_fancy_2d(self):
# case 1
frame = self.frame.copy()
expected = frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = np.random.randn(3, 2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
expected2 = self.frame.copy()
arr = np.random.randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = np.random.randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.iloc[:, -3:]
assert sliced['D'].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.iloc[:, -3:]
with pytest.raises(com.SettingWithCopyError):
sliced['C'] = 4.
assert (self.frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
# labels that aren't contained
with pytest.raises(KeyError, match=r"\[1\] not in index"):
df.ix[[0, 1, 2], [2, 3, 4]] = 5
# try to set indices not contained in frame
msg = (r"None of \[Index\(\['foo', 'bar', 'baz'\],"
r" dtype='object'\)\] are in the \[index\]")
with pytest.raises(KeyError, match=msg):
self.frame.ix[['foo', 'bar', 'baz']] = 1
msg = (r"None of \[Index\(\['E'\], dtype='object'\)\] are in the"
r" \[columns\]")
with pytest.raises(KeyError, match=msg):
self.frame.ix[:, ['E']] = 1
# partial setting now allows this GH2578
# pytest.raises(KeyError, self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
self.mixed_frame.ix[5] = np.nan
assert isna(self.mixed_frame.ix[5]).all()
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
check_names=False)
# #1432
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
assert df._is_mixed_type
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(np.random.randn(10), name=0).sort_values()
df_orig = DataFrame(np.random.randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(np.random.randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
sub = df.ix[:'d']
assert_frame_equal(sub, df)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
# return self if no slicing...for now
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert f.ix[:, :] is f
# low dimensional slice
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs1 = f.ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
ts1 = f.ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
tm.assert_series_equal(ts1, ts2)
# positional xs
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs1 = f.ix[0]
xs2 = f.xs(f.index[0])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs1 = f.ix[f.index[5]]
xs2 = f.xs(f.index[5])
tm.assert_series_equal(xs1, xs2)
# single column
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_series_equal(f.ix[:, 'A'], f['A'])
# return view
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
exp = f.copy()
exp.values[5] = 4
f.ix[5][:] = 4
tm.assert_frame_equal(exp, f)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
exp.values[:, 1] = 6
f.ix[:, 1][:] = 6
tm.assert_frame_equal(exp, f)
# slice of mixed-frame
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
tm.assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
vals = np.random.randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.loc
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[f.index[5:10], [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.loc[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.loc[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.iloc[[1, 4, 7]]
expected = self.frame.loc[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.iloc[:, [2, 0, 1]]
expected = self.frame.loc[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.iloc
with pytest.raises(IndexingError, match='Too many indexers'):
ix[:, :, :]
with pytest.raises(IndexingError):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.loc[mask]
expected = self.frame.loc[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]),
np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = ("cannot do slice indexing on"
r" <class 'pandas\.core\.indexes\.numeric\.Float64Index'> with"
r" these indexers \[1.0\] of <class 'float'>")
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError):
result = cp.iloc[1.0:5] == 0 # noqa
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed(self):
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.loc[df.index[::2], 'str'] = np.nan
expected = np.array([np.nan, 'qux', np.nan, 'qux', np.nan],
dtype=object)
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
df.loc['b', 'timestamp'] = iNaT
assert isna(df.loc['b', 'timestamp'])
# allow this syntax
df.loc['c', 'timestamp'] = np.nan
assert isna(df.loc['c', 'timestamp'])
# allow this syntax
df.loc['d', :] = np.nan
assert not isna(df.loc['c', :]).all()
# as of GH 3216 this will now work!
# try to set with a list like item
# pytest.raises(
# Exception, df.loc.__setitem__, ('d', 'timestamp'), [np.nan])
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame({'a': [0, 0, 0, 0, 13, 14],
'b': [pd.datetime(2012, 1, 1),
1,
'x',
'y',
pd.datetime(2013, 1, 1),
pd.datetime(2014, 1, 1)]})
df = pd.DataFrame(0, columns=list('ab'), index=range(6))
df['b'] = pd.NaT
df.loc[0, 'b'] = pd.datetime(2012, 1, 1)
df.loc[1, 'b'] = 1
df.loc[[2, 3], 'b'] = 'x', 'y'
A = np.array([[13, np.datetime64('2013-01-01T00:00:00')],
[14, np.datetime64('2014-01-01T00:00:00')]])
df.loc[[4, 5], ['a', 'b']] = A
assert_frame_equal(df, expected)
def test_setitem_frame(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.]],
index=f.index[0:2], columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2], ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.], [5., 6.], [7., 8.]],
index=list(f.index[0:2]) + ['foo', 'bar'],
columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2:], ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.loc[f.index[:2], ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.loc[f.index[:2], ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece.values
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1, 2, 'foo'], [3, 4, 'bar']], columns=['A', 'B', 'C'])
df2 = df.copy()
df2.loc[:, ['A', 'B']] = df.loc[:, ['A', 'B']] + 0.5
expected = df.reindex(columns=['A', 'B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.loc[self.frame.index[-2:], ['A', 'B']] = piece
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.loc['bar']
expected = df.iloc[[2, 4]]
assert_frame_equal(result, expected)
result = df.loc['baz']
expected = df.iloc[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc[['bar']]
exp = df.iloc[[2, 4]]
assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
with pytest.raises(KeyError, match=r"^False$"):
df.loc[False]
with pytest.raises(KeyError, match=r"^True$"):
df.loc[True]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[False] = 0
with pytest.raises(KeyError, match=msg):
df.loc[True] = 0
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
assert result.columns.name == 'foo'
expected = df.iloc[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert result == expected
def test_lookup(self):
def alt(df, rows, cols, dtype):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = [df.get_value(r, c) for r, c in zip(rows, cols)]
return np.array(result, dtype=dtype)
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols, dtype=np.object_)
tm.assert_almost_equal(result, expected, check_dtype=False)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'], dtype=np.bool_)
tm.assert_series_equal(df['mask'], pd.Series(exp_mask, name='mask'))
assert df['mask'].dtype == np.bool_
with pytest.raises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with pytest.raises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with pytest.raises(ValueError, match='same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.frame.set_value(idx, col, 1)
assert self.frame[col][idx] == 1
@pytest.mark.skipif(PY2, reason="pytest.raises match regex fails")
def test_set_value_resize(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.frame.set_value('foobar', 'B', 0)
assert res is self.frame
assert res.index[-1] == 'foobar'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res.get_value('foobar', 'B') == 0
self.frame.loc['foobar', 'qux'] = 0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert self.frame.get_value('foobar', 'qux') == 0
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 'sam')
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', True)
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 5)
assert is_float_dtype(res3['baz'])
assert isna(res3['baz'].drop(['foobar'])).all()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
msg = "could not convert string to float: 'sam'"
with pytest.raises(ValueError, match=msg):
res3.set_value('foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(np.random.randn(3, 3),
index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.set_value('C', 2, 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
df = df_orig.copy()
df.loc['C', 2] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
# create both new
df = df_orig.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.set_value('C', 'D', 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
with pytest.raises(KeyError, match=r"^0$"):
df.get_value(0, 1)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = self.frame.ix[self.frame.index[5], 'E']
assert is_integer(result)
result = self.frame.loc[self.frame.index[5], 'E']
assert is_integer(result)
# GH 11617
df = pd.DataFrame(dict(a=[1.23]))
df["b"] = 666
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[0, "b"]
assert is_integer(result)
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name='b')
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[[0], "b"]
assert_series_equal(result, expected)
result = df.loc[[0], "b"]
assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
with pytest.raises(com.SettingWithCopyError):
result[2] = 0.
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
with pytest.raises(com.SettingWithCopyError):
result[8] = 0.
assert (df[8] == 0).all()
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.iloc[0]
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result2 = df.ix[0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = pd.date_range(start=pd.Timestamp(year=2017, month=1, day=1),
end=pd.Timestamp(year=2017, month=1, day=5))
trange = trange.insert(loc=5,
item=pd.Timestamp(year=2017, month=1, day=5))
df = pd.DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = pd.DataFrame({'A': [0, 0, 0, 0, 6, 6],
'B': [0, 0, 0, 0, 0, 0]},
index=trange)
tm.assert_frame_equal(df, expected)
# in-place
df = pd.DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_iloc_sparse_propegate_fill_value(self):
from pandas.core.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
assert len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)
def test_iat(self):
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iat[i, j]
expected = self.frame.at[row, col]
assert result == expected
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
index = list(df.index)
index[0] = ["a", "b"]
df.index = index
try:
repr(df)
except Exception as e:
assert type(e) != UnboundLocalError
@pytest.mark.parametrize("method,expected_values", [
("nearest", [0, 1, 1, 2]),
("pad", [np.nan, 0, 1, 1]),
("backfill", [0, 1, 2, 2])
])
def test_reindex_methods(self, method, expected_values):
df = pd.DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = pd.DataFrame({'x': expected_values}, index=target)
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
actual = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1])
assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
assert_frame_equal(e2, actual)
switched_method = ('pad' if method == 'backfill'
else 'backfill' if method == 'pad'
else method)
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
def test_reindex_methods_nearest_special(self):
df = pd.DataFrame({"x": list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
expected = pd.DataFrame({"x": [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest", tolerance=0.2)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({"x": [0, np.nan, 1, np.nan]}, index=target)
actual = df.reindex(target, method="nearest",
tolerance=[0.5, 0.01, 0.4, 0.1])
assert_frame_equal(expected, actual)
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
assert np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))
mask = com.isna(result)['B']
assert mask[-5:].all()
assert not mask[:-5].any()
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
assert x[0].dtype == np.dtype('M8[ns]')
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6, 1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
# index is not monotonic increasing or decreasing
msg = "index must be monotonic increasing or decreasing"
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method='pad')
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method='ffill')
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method='bfill')
with pytest.raises(ValueError, match=msg):
df_rev.reindex(df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
def f(val):
return np.nonzero((df[level] == val).to_numpy())[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
def verify(df, level, idx, indexer, check_index_type=True):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
df = pd.DataFrame({'jim': list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe': list('abcdeabcd')[::-1],
'jolie': [10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C']]
for idx in target:
verify_first_level(df, 'jim', idx)
# reindex by these causes different MultiIndex levels
for idx in [['D', 'F'], ['A', 'C', 'B']]:
verify_first_level(df, 'jim', idx, check_index_type=False)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [], check_index_type=False)
df = DataFrame({'jim': ['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe': ['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
'jolie': np.concatenate([
np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]]),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i + 1])
i = [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10,
11, 12, 13, 14, 18, 19, 15, 16, 17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 5, 6,
7, 8, 9, 15, 16, 17, 18, 19, 13, 14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
def test_getitem_sparse_column(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = pd.SparseArray([0, 1])
df = pd.DataFrame({"A": data})
expected = pd.Series(data, name="A")
result = df['A']
tm.assert_series_equal(result, expected)
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, 'A']
tm.assert_series_equal(result, expected)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_array = pd.SparseArray([0, 0, 1])
df['new_column'] = sp_array
assert_series_equal(df['new_column'],
pd.Series(sp_array, name='new_column'),
check_names=False)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series(pd.SparseArray([0, 0, 1]), index=[2, 1, 0])
df['new_column'] = sp_series
exp = pd.Series(pd.SparseArray([1, 0, 0]), name='new_column')
assert_series_equal(df['new_column'], exp)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = pd.Series(pd.date_range('2015-01-01', periods=3, tz='utc'),
name='dates')
df = pd.DataFrame({'dates': column})
df['dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
df = pd.DataFrame({'dates': column})
df.loc[[0, 1, 2], 'dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
def test_setitem_datetime_coercion(self):
# gh-1048
df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')] * 3})
df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
assert pd.Timestamp('2008-08-08') == df.loc[0, 'c']
assert pd.Timestamp('2008-08-08') == df.loc[1, 'c']
df.loc[2, 'c'] = date(2005, 5, 5)
assert pd.Timestamp('2005-05-05') == df.loc[2, 'c']
def test_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101', periods=4))
df['A'] = np.array([1 * one_hour] * 4, dtype='m8[ns]')
df.loc[:, 'B'] = np.array([2 * one_hour] * 4, dtype='m8[ns]')
df.loc[:3, 'C'] = np.array([3 * one_hour] * 3, dtype='m8[ns]')
df.loc[:, 'D'] = np.array([4 * one_hour] * 4, dtype='m8[ns]')
df.loc[df.index[:3], 'E'] = np.array([5 * one_hour] * 3,
dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.loc[df.index[:-1], 'F'] = np.array([6 * one_hour] * 3,
dtype='m8[ns]')
df.loc[df.index[-3]:, 'G'] = date_range('20130101', periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')] * 6 +
[np.dtype('datetime64[ns]')] * 2,
index=list('ABCDEFGH'))
assert_series_equal(result, expected)
@pytest.mark.parametrize('idxer', ['var', ['var']])
def test_setitem_datetimeindex_tz(self, idxer, tz_naive_fixture):
# GH 11365
tz = tz_naive_fixture
idx = date_range(start='2015-07-12', periods=3, freq='H', tz=tz)
expected = DataFrame(1.2, index=idx, columns=['var'])
result = DataFrame(index=idx, columns=['var'])
result.loc[:, idxer] = expected
tm.assert_frame_equal(result, expected)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(np.random.randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 4
result = df.between_time(bkey.start, bkey.stop)
expected = df.loc[bkey]
expected2 = df.iloc[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 12
result = df.copy()
result.loc[akey] = 0
result = result.loc[akey]
expected = df.loc[akey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[akey] = 0
result.loc[akey] = df.iloc[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.loc[bkey] = 0
result = result.loc[bkey]
expected = df.loc[bkey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[bkey] = 0
result.loc[bkey] = df.iloc[binds]
assert_frame_equal(result, df)
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
assert np.isnan(self.frame[item][idx])
else:
assert value == self.frame[item][idx]
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
assert xs.dtype == np.object_
assert xs['A'] == 1
assert xs['B'] == '1'
with pytest.raises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - BDay())
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
assert (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
exp = pd.Series([1., 'foo', 2., 'bar', 3.],
index=list('ABCDE'), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([], name='a', index=pd.Index([], dtype=object))
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(np.random.randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = (DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}})
.set_index(['year', 'flavour', 'day']))
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_xs_view(self):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
assert (dm.xs(2) == 10).all()
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
with catch_warnings(record=True):
simplefilter("ignore", DeprecationWarning)
result = df.ix[IndexType("foo", "bar")]["A"]
assert result == 1
result = df.loc[IndexType("foo", "bar")]["A"]
assert result == 1
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A', 'B', 'C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with pytest.raises(ValueError, match='Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame({
long(0): {35: np.nan, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987,
49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan,
50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
assert_frame_equal(df2, expected)
df['foo'] = 'test'
msg = ("boolean setting on mixed-type|"
"not supported between|"
"unorderable types")
with pytest.raises(TypeError, match=msg):
# TODO: This message should be the same in PY2/PY3
df[df > 0.3] = 1
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),
columns=['A', 'B', 'C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (issubclass(s.dtype.type, (np.integer, np.floating)) and
s.dtype != 'uint8')
return DataFrame(dict((c, s + 1) if is_ok(s) else (c, s)
for c, s in compat.iteritems(df)))
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
exp = Series(
np.where(cond[k], df[k], other1[k]), index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
for df in [default_frame, self.mixed_frame,
self.mixed_float, self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
with pytest.raises(TypeError):
df > 0
continue
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame({c: Series([1] * 3, dtype=c)
for c in ['float32', 'float64',
'int32', 'int64']})
df.iloc[1, :] = 0
result = df.where(df >= 0).get_dtype_counts()
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({'float32': 1, 'float64': 1, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
with pytest.raises(TypeError):
df > 0
continue
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all(not issubclass(s.type, np.integer)
for s in df.dtypes)
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
msg = "other must be the same shape as self when an ndarray"
with pytest.raises(ValueError, match=msg):
df.where(cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
df.where(err2, other1)
with pytest.raises(ValueError, match=msg):
df.mask(True)
with pytest.raises(ValueError, match=msg):
df.mask(0)
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype('float64')
assert dfi[k].dtype == v
for df in [default_frame, self.mixed_frame, self.mixed_float,
self.mixed_int]:
if compat.PY3 and df is self.mixed_frame:
with pytest.raises(TypeError):
df > 0
continue
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({'a': range(3), 'b': range(4, 7)})
result = df.where(df['a'] == 1)
expected = df[df['a'] == 1].reindex(df.index)
assert_frame_equal(result, expected)
@pytest.mark.parametrize("klass", [list, tuple, np.array])
def test_where_array_like(self, klass):
# see gh-15414
df = DataFrame({"a": [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({"a": [np.nan, 2, 3]})
result = df.where(klass(cond))
assert_frame_equal(result, expected)
df["b"] = 2
expected["b"] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
result = df.where(klass(cond))
assert_frame_equal(result, expected)
@pytest.mark.parametrize("cond", [
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({"a": [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")],
[pd.NaT], [Timestamp("2017-01-02")]]
])
def test_where_invalid_input_single(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
@pytest.mark.parametrize("cond", [
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"],
["True", "True"]],
DataFrame({"a": [2, 5, 7], "b": [4, 8, 9]}),
[[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")]]
])
def test_where_invalid_input_multiple(self, cond):
# see gh-15414: only boolean arrays accepted
df = DataFrame({"a": [1, 2, 3], "b": [2, 2, 2]})
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
result = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(result, expected)
# this *does* align, though has no matching columns
cond.columns = ["a", "b", "c"]
result = df.where(cond)
expected = DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# see gh-2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [
4.0, 3.0, 2.0, 1.0]}, dtype='float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [
4.0, 3.0, np.nan, np.nan]}, dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
def test_where_bug_mixed(self, sint_dtype):
# see gh-2793
df = DataFrame({"a": np.array([1, 2, 3, 4], dtype=sint_dtype),
"b": np.array([4.0, 3.0, 2.0, 1.0],
dtype="float64")})
expected = DataFrame({"a": [np.nan, np.nan, 3.0, 4.0],
"b": [4.0, 3.0, np.nan, np.nan]},
dtype="float64")
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
def test_where_bug_transposition(self):
# see gh-7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A=date_range('20130102', periods=5),
B=date_range('20130104', periods=5),
C=np.random.randn(5)))
stamp = datetime(2013, 1, 3)
with pytest.raises(TypeError):
df > stamp
result = df[df.iloc[:, :-1] > stamp]
expected = df.copy()
expected.loc[[0, 1], 'A'] = np.nan
expected.loc[:, 'C'] = np.nan
assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{'series': Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])})
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {
'A': np.nan, 'B': 'Test', 'C': np.nan}])
msg = 'boolean setting on mixed-type'
with pytest.raises(TypeError, match=msg):
df.where(~isna(df), None, inplace=True)
def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self):
# see gh-21947
df = pd.DataFrame(columns=["a"])
cond = df.applymap(lambda x: x > 0)
result = df.where(cond)
tm.assert_frame_equal(result, df)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10, 3))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notna(df), df.mean(), axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notna(df), df.mean(), inplace=True, axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis='index')
assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notna(df), DataFrame(
1, index=df.index, columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame(
[[1 + 1j, 2], [np.nan, 4 + 1j]], columns=['a', 'b'])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df, expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype='int64')
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]])
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
expected = DataFrame({0: np.array([0, 0], dtype='int64'),
1: np.array([np.nan, np.nan], dtype='float64')})
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Multiple dtypes (=> multiple Blocks)
df = pd.concat([
DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)), dtype='int64')],
ignore_index=True, axis=1)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis='columns')
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype('int64')
expected[3] = expected[3].astype('int64')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s1, axis='columns', inplace=True)
assert_frame_equal(result, expected)
result = df.where(mask, s2, axis='index')
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype('int64')
expected[3] = expected[3].astype('int64')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s2, axis='index', inplace=True)
assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
assert_frame_equal(result, expected)
result = df.where(mask, d1, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True, axis='index')
assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
assert_frame_equal(result, expected)
result = df.where(mask, d2, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True, axis='columns')
assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_where_tz_values(self, tz_naive_fixture):
df1 = DataFrame(DatetimeIndex(['20150101', '20150102', '20150103'],
tz=tz_naive_fixture),
columns=['date'])
df2 = DataFrame(DatetimeIndex(['20150103', '20150104', '20150105'],
tz=tz_naive_fixture),
columns=['date'])
mask = DataFrame([True, True, False], columns=['date'])
exp = DataFrame(DatetimeIndex(['20150101', '20150102', '20150105'],
tz=tz_naive_fixture),
columns=['date'])
result = df1.where(mask, df2)
assert_frame_equal(exp, result)
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.randn(5, 3))
rs = df.where(cond, other)
assert_frame_equal(rs, df.mask(df <= 0, other))
assert_frame_equal(rs, df.mask(~cond, other))
# see gh-21891
df = DataFrame([1, 2])
res = df.mask([[True], [False]])
exp = DataFrame([np.nan, 2])
tm.assert_frame_equal(res, exp)
def test_mask_inplace(self):
# GH8801
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rdf = df.copy()
rdf.where(cond, inplace=True)
assert_frame_equal(rdf, df.where(cond))
assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
rdf.where(cond, -df, inplace=True)
assert_frame_equal(rdf, df.where(cond, -df))
assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[np.nan, 2]])
assert_frame_equal(res, expec)
def test_mask_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.mask(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
# return ndarray and scalar
result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
# chain
result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).mask((df + 2) > 8, (df + 2) + 10))
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame[0:0])
assert_frame_equal(self.frame.tail(0), self.frame[0:0])
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df[0:0])
assert_frame_equal(df.tail(0), df[0:0])
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(columns=['i', 'c', 'x', 'y'],
data=[[0, 0, 1, 2], [1, 0, 3, 4],
[0, 1, 1, 2], [1, 1, 3, 4]])
dg = df.pivot_table(index='i', columns='c',
values=['x', 'y'])
with pytest.raises(TypeError, match="is an invalid key"):
str(dg[:, 0])
index = Index(range(2), name='i')
columns = MultiIndex(levels=[['x', 'y'], [0, 1]],
codes=[[0, 1], [0, 0]],
names=[None, 'c'])
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
assert_frame_equal(result, expected)
name = ('x', 0)
index = Index(range(2), name='i')
expected = Series([1, 3], index=index, name=name)
result = dg['x', 0]
assert_series_equal(result, expected)
def test_interval_index(self):
# GH 19977
index = pd.interval_range(start=0, periods=3)
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=index,
columns=['A', 'B', 'C'])
expected = 1
result = df.loc[0.5, 'A']
assert_almost_equal(result, expected)
index = pd.interval_range(start=0, periods=3, closed='both')
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=index,
columns=['A', 'B', 'C'])
index_exp = pd.interval_range(start=0, periods=2,
freq=1, closed='both')
expected = pd.Series([1, 4], index=index_exp, name='A')
result = df.loc[1, 'A']
assert_series_equal(result, expected)
class TestDataFrameIndexingDatetimeWithTZ(TestData):
def setup_method(self, method):
self.idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
self.dr = date_range('20130110', periods=3)
self.df = DataFrame({'A': self.idx, 'B': self.dr})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
tm.assert_extension_array_equal(b1.values, b2.values)
assert id(b1.values._data.base) != id(b2.values._data.base)
# with nan
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notna(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, df.dtypes)
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype, 'M8[ns, US/Eastern'
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
def test_scalar_assignment(self):
# issue #19843
df = pd.DataFrame(index=(0, 1, 2))
df['now'] = pd.Timestamp('20130101', tz='UTC')
expected = pd.DataFrame(
{'now': pd.Timestamp('20130101', tz='UTC')}, index=[0, 1, 2])
tm.assert_frame_equal(df, expected)
class TestDataFrameIndexingUInt64(TestData):
def setup_method(self, method):
self.ir = Index(np.arange(3), dtype=np.uint64)
self.idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo')
self.df = DataFrame({'A': self.idx, 'B': self.ir})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notna(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, Series([np.dtype('uint64'),
np.dtype('O'), np.dtype('O')],
index=['A', 'B', 'C']))
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype == np.dtype('uint64')
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
class TestDataFrameIndexingCategorical(object):
def test_assignment(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = Categorical(["{0} - {1}".format(i, i + 499)
for i in range(0, 10000, 500)])
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), CategoricalDtype(categories=labels,
ordered=False)],
index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False)],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
tm.assert_categorical_equal(result1._data._block.values, d)
# sorting
s.name = 'E'
tm.assert_series_equal(result2.sort_index(), s.sort_index())
cat = Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = DataFrame(Series(cat))
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"],
categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"],
categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"],
categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = DataFrame({"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError):
df = orig.copy()
df.iloc[2, 0] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError):
df = orig.copy()
df.iloc[2, :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError):
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list('bb'), categories=list('abc'))
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = Categorical(list('cc'), categories=list('abc'))
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError):
df = orig.copy()
df.loc["j", "cats"] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["b", "b"], categories=["a", "b", "c"])
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", df.columns[0]] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError):
df = orig.copy()
df.loc["j", df.columns[0]] = "c"
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
with pytest.raises(ValueError):
df = orig.copy()
df.loc["j", :] = ["c", 2]
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
with pytest.raises(ValueError):
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["b", "b"], categories=["a", "b", "c"])
with pytest.raises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", df.columns[0]] = Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", df.columns[0]] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with pytest.raises(ValueError):
df.loc["j":"k", df.columns[0]] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError):
df = orig.copy()
df.iat[2, 0] = "c"
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
with pytest.raises(ValueError):
df = orig.copy()
df.at["j", "cats"] = "c"
# fancy indexing
catsf = Categorical(["a", "a", "c", "c", "a", "a", "a"],
categories=["a", "b", "c"])
idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
# set_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
with pytest.raises(ValueError):
df = orig.copy()
df.at["j", "cats"] = "c"
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = DataFrame({"a": [1, 1, 1, 1, 1], "b": list("aaaaa")})
exp = DataFrame({"a": [1, "b", "b", 1, 1], "b": list("aabba")})
df.loc[1:2, "a"] = Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
def test_functions_no_warnings(self):
df = DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
|
bsd-3-clause
|
rabernat/xray
|
xarray/tests/test_indexing.py
|
1
|
15372
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pytest
import numpy as np
import pandas as pd
from xarray import Dataset, DataArray, Variable
from xarray.core import indexing
from xarray.core import nputils
from xarray.core.pycompat import native_int_types
from . import TestCase, ReturnItem, raises_regex, IndexerMaker
B = IndexerMaker(indexing.BasicIndexer)
class TestIndexers(TestCase):
def set_to_zero(self, x, i):
x = x.copy()
x[i] = 0
return x
def test_expanded_indexer(self):
x = np.random.randn(10, 11, 12, 13, 14)
y = np.arange(5)
I = ReturnItem()
for i in [I[:], I[...], I[0, :, 10], I[..., 10], I[:5, ..., 0],
I[..., 0, :], I[y], I[y, y], I[..., y, y],
I[..., 0, 1, 2, 3, 4]]:
j = indexing.expanded_indexer(i, x.ndim)
self.assertArrayEqual(x[i], x[j])
self.assertArrayEqual(self.set_to_zero(x, i),
self.set_to_zero(x, j))
with raises_regex(IndexError, 'too many indices'):
indexing.expanded_indexer(I[1, 2, 3], 2)
def test_asarray_tuplesafe(self):
res = indexing._asarray_tuplesafe(('a', 1))
assert isinstance(res, np.ndarray)
assert res.ndim == 0
assert res.item() == ('a', 1)
res = indexing._asarray_tuplesafe([(0,), (1,)])
assert res.shape == (2,)
assert res[0] == (0,)
assert res[1] == (1,)
def test_convert_label_indexer(self):
# TODO: add tests that aren't just for edge cases
index = pd.Index([1, 2, 3])
with raises_regex(KeyError, 'not all values found'):
indexing.convert_label_indexer(index, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(index, 0)
with raises_regex(ValueError, 'does not have a MultiIndex'):
indexing.convert_label_indexer(index, {'one': 0})
mindex = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=('one', 'two'))
with raises_regex(KeyError, 'not all values found'):
indexing.convert_label_indexer(mindex, [0])
with pytest.raises(KeyError):
indexing.convert_label_indexer(mindex, 0)
with pytest.raises(ValueError):
indexing.convert_label_indexer(index, {'three': 0})
with pytest.raises((KeyError, IndexError)):
# pandas 0.21 changed this from KeyError to IndexError
indexing.convert_label_indexer(
mindex, (slice(None), 1, 'no_level'))
def test_convert_unsorted_datetime_index_raises(self):
index = pd.to_datetime(['2001', '2000', '2002'])
with pytest.raises(KeyError):
# pandas will try to convert this into an array indexer. We should
# raise instead, so we can be sure the result of indexing with a
# slice is always a view.
indexing.convert_label_indexer(index, slice('2001', '2002'))
def test_get_dim_indexers(self):
mindex = pd.MultiIndex.from_product([['a', 'b'], [1, 2]],
names=('one', 'two'))
mdata = DataArray(range(4), [('x', mindex)])
dim_indexers = indexing.get_dim_indexers(mdata, {'one': 'a', 'two': 1})
self.assertEqual(dim_indexers, {'x': {'one': 'a', 'two': 1}})
with raises_regex(ValueError, 'cannot combine'):
indexing.get_dim_indexers(mdata, {'x': 'a', 'two': 1})
with raises_regex(ValueError, 'do not exist'):
indexing.get_dim_indexers(mdata, {'y': 'a'})
with raises_regex(ValueError, 'do not exist'):
indexing.get_dim_indexers(mdata, {'four': 1})
def test_remap_label_indexers(self):
def test_indexer(data, x, expected_pos, expected_idx=None):
pos, idx = indexing.remap_label_indexers(data, {'x': x})
self.assertArrayEqual(pos.get('x'), expected_pos)
self.assertArrayEqual(idx.get('x'), expected_idx)
data = Dataset({'x': ('x', [1, 2, 3])})
mindex = pd.MultiIndex.from_product([['a', 'b'], [1, 2], [-1, -2]],
names=('one', 'two', 'three'))
mdata = DataArray(range(8), [('x', mindex)])
test_indexer(data, 1, 0)
test_indexer(data, np.int32(1), 0)
test_indexer(data, Variable([], 1), 0)
test_indexer(mdata, ('a', 1, -1), 0)
test_indexer(mdata, ('a', 1),
[True, True, False, False, False, False, False, False],
[-1, -2])
test_indexer(mdata, 'a', slice(0, 4, None),
pd.MultiIndex.from_product([[1, 2], [-1, -2]]))
test_indexer(mdata, ('a',),
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]))
test_indexer(mdata, [('a', 1, -1), ('b', 2, -2)], [0, 7])
test_indexer(mdata, slice('a', 'b'), slice(0, 8, None))
test_indexer(mdata, slice(('a', 1), ('b', 1)), slice(0, 6, None))
test_indexer(mdata, {'one': 'a', 'two': 1, 'three': -1}, 0)
test_indexer(mdata, {'one': 'a', 'two': 1},
[True, True, False, False, False, False, False, False],
[-1, -2])
test_indexer(mdata, {'one': 'a', 'three': -1},
[True, False, True, False, False, False, False, False],
[1, 2])
test_indexer(mdata, {'one': 'a'},
[True, True, True, True, False, False, False, False],
pd.MultiIndex.from_product([[1, 2], [-1, -2]]))
class TestLazyArray(TestCase):
def test_slice_slice(self):
I = ReturnItem()
x = np.arange(100)
slices = [I[:3], I[:4], I[2:4], I[:1], I[:-1], I[5:-1], I[-5:-1],
I[::-1], I[5::-1], I[:3:-1], I[:30:-1], I[10:4:], I[::4],
I[4:4:4], I[:4:-4]]
for i in slices:
for j in slices:
expected = x[i][j]
new_slice = indexing.slice_slice(i, j, size=100)
actual = x[new_slice]
self.assertArrayEqual(expected, actual)
def test_lazily_indexed_array(self):
original = np.random.rand(10, 20, 30)
x = indexing.NumpyIndexingAdapter(original)
v = Variable(['i', 'j', 'k'], original)
lazy = indexing.LazilyIndexedArray(x)
v_lazy = Variable(['i', 'j', 'k'], lazy)
I = ReturnItem()
# test orthogonally applied indexers
indexers = [I[:], 0, -2, I[:3], [0, 1, 2, 3], [0], np.arange(10) < 5]
for i in indexers:
for j in indexers:
for k in indexers:
if isinstance(j, np.ndarray) and j.dtype.kind == 'b':
j = np.arange(20) < 5
if isinstance(k, np.ndarray) and k.dtype.kind == 'b':
k = np.arange(30) < 5
expected = np.asarray(v[i, j, k])
for actual in [v_lazy[i, j, k],
v_lazy[:, j, k][i],
v_lazy[:, :, k][:, j][i]]:
self.assertEqual(expected.shape, actual.shape)
self.assertArrayEqual(expected, actual)
assert isinstance(actual._data,
indexing.LazilyIndexedArray)
# make sure actual.key is appropriate type
if all(isinstance(k, native_int_types + (slice, ))
for k in v_lazy._data.key.tuple):
assert isinstance(v_lazy._data.key,
indexing.BasicIndexer)
else:
assert isinstance(v_lazy._data.key,
indexing.OuterIndexer)
# test sequentially applied indexers
indexers = [(3, 2), (I[:], 0), (I[:2], -1), (I[:4], [0]), ([4, 5], 0),
([0, 1, 2], [0, 1]), ([0, 3, 5], I[:2])]
for i, j in indexers:
expected = np.asarray(v[i][j])
actual = v_lazy[i][j]
self.assertEqual(expected.shape, actual.shape)
self.assertArrayEqual(expected, actual)
assert isinstance(actual._data, indexing.LazilyIndexedArray)
assert isinstance(actual._data.array,
indexing.NumpyIndexingAdapter)
class TestCopyOnWriteArray(TestCase):
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
wrapped[B[:]] = 0
self.assertArrayEqual(original, np.arange(10))
self.assertArrayEqual(wrapped, np.zeros(10))
def test_sub_array(self):
original = np.arange(10)
wrapped = indexing.CopyOnWriteArray(original)
child = wrapped[B[:5]]
self.assertIsInstance(child, indexing.CopyOnWriteArray)
child[B[:]] = 0
self.assertArrayEqual(original, np.arange(10))
self.assertArrayEqual(wrapped, np.arange(10))
self.assertArrayEqual(child, np.zeros(5))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.CopyOnWriteArray(np.array(['foo', 'bar']))
assert np.array(x[B[0]][B[()]]) == 'foo'
class TestMemoryCachedArray(TestCase):
def test_wrapper(self):
original = indexing.LazilyIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
self.assertArrayEqual(wrapped, np.arange(10))
self.assertIsInstance(wrapped.array, indexing.NumpyIndexingAdapter)
def test_sub_array(self):
original = indexing.LazilyIndexedArray(np.arange(10))
wrapped = indexing.MemoryCachedArray(original)
child = wrapped[B[:5]]
self.assertIsInstance(child, indexing.MemoryCachedArray)
self.assertArrayEqual(child, np.arange(5))
self.assertIsInstance(child.array, indexing.NumpyIndexingAdapter)
self.assertIsInstance(wrapped.array, indexing.LazilyIndexedArray)
def test_setitem(self):
original = np.arange(10)
wrapped = indexing.MemoryCachedArray(original)
wrapped[B[:]] = 0
self.assertArrayEqual(original, np.zeros(10))
def test_index_scalar(self):
# regression test for GH1374
x = indexing.MemoryCachedArray(np.array(['foo', 'bar']))
assert np.array(x[B[0]][B[()]]) == 'foo'
def test_base_explicit_indexer():
with pytest.raises(TypeError):
indexing.ExplicitIndexer(())
class Subclass(indexing.ExplicitIndexer):
pass
value = Subclass((1, 2, 3))
assert value.tuple == (1, 2, 3)
assert repr(value) == 'Subclass((1, 2, 3))'
@pytest.mark.parametrize('indexer_cls', [indexing.BasicIndexer,
indexing.OuterIndexer,
indexing.VectorizedIndexer])
def test_invalid_for_all(indexer_cls):
with pytest.raises(TypeError):
indexer_cls(None)
with pytest.raises(TypeError):
indexer_cls(([],))
with pytest.raises(TypeError):
indexer_cls((None,))
with pytest.raises(TypeError):
indexer_cls(('foo',))
with pytest.raises(TypeError):
indexer_cls((1.0,))
with pytest.raises(TypeError):
indexer_cls((slice('foo'),))
with pytest.raises(TypeError):
indexer_cls((np.array(['foo']),))
def check_integer(indexer_cls):
value = indexer_cls((1, np.uint64(2),)).tuple
assert all(isinstance(v, int) for v in value)
assert value == (1, 2)
def check_slice(indexer_cls):
(value,) = indexer_cls((slice(1, None, np.int64(2)),)).tuple
assert value == slice(1, None, 2)
assert isinstance(value.step, native_int_types)
def check_array1d(indexer_cls):
(value,) = indexer_cls((np.arange(3, dtype=np.int32),)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, [0, 1, 2])
def check_array2d(indexer_cls):
array = np.array([[1, 2], [3, 4]], dtype=np.int64)
(value,) = indexer_cls((array,)).tuple
assert value.dtype == np.int64
np.testing.assert_array_equal(value, array)
def test_basic_indexer():
check_integer(indexing.BasicIndexer)
check_slice(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array1d(indexing.BasicIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.BasicIndexer)
def test_outer_indexer():
check_integer(indexing.OuterIndexer)
check_slice(indexing.OuterIndexer)
check_array1d(indexing.OuterIndexer)
with pytest.raises(TypeError):
check_array2d(indexing.OuterIndexer)
def test_vectorized_indexer():
with pytest.raises(TypeError):
check_integer(indexing.VectorizedIndexer)
check_slice(indexing.VectorizedIndexer)
check_array1d(indexing.VectorizedIndexer)
check_array2d(indexing.VectorizedIndexer)
def test_unwrap_explicit_indexer():
indexer = indexing.BasicIndexer((1, 2))
target = None
unwrapped = indexing.unwrap_explicit_indexer(
indexer, target, allow=indexing.BasicIndexer)
assert unwrapped == (1, 2)
with raises_regex(NotImplementedError, 'Load your data'):
indexing.unwrap_explicit_indexer(
indexer, target, allow=indexing.OuterIndexer)
with raises_regex(TypeError, 'unexpected key type'):
indexing.unwrap_explicit_indexer(
indexer.tuple, target, allow=indexing.OuterIndexer)
def test_implicit_indexing_adapter():
array = np.arange(10)
implicit = indexing.ImplicitToExplicitIndexingAdapter(
indexing.NumpyIndexingAdapter(array), indexing.BasicIndexer)
np.testing.assert_array_equal(array, np.asarray(implicit))
np.testing.assert_array_equal(array, implicit[:])
def test_outer_indexer_consistency_with_broadcast_indexes_vectorized():
def nonzero(x):
if isinstance(x, np.ndarray) and x.dtype.kind == 'b':
x = x.nonzero()[0]
return x
original = np.random.rand(10, 20, 30)
v = Variable(['i', 'j', 'k'], original)
I = ReturnItem()
# test orthogonally applied indexers
indexers = [I[:], 0, -2, I[:3], np.array([0, 1, 2, 3]), np.array([0]),
np.arange(10) < 5]
for i, j, k in itertools.product(indexers, repeat=3):
if isinstance(j, np.ndarray) and j.dtype.kind == 'b': # match size
j = np.arange(20) < 4
if isinstance(k, np.ndarray) and k.dtype.kind == 'b':
k = np.arange(30) < 8
_, expected, new_order = v._broadcast_indexes_vectorized((i, j, k))
expected_data = nputils.NumpyVIndexAdapter(v.data)[expected.tuple]
if new_order:
old_order = range(len(new_order))
expected_data = np.moveaxis(expected_data, old_order,
new_order)
outer_index = (nonzero(i), nonzero(j), nonzero(k))
actual = indexing._outer_to_numpy_indexer(outer_index, v.shape)
actual_data = v.data[actual]
np.testing.assert_array_equal(actual_data, expected_data)
|
apache-2.0
|
procoder317/scikit-learn
|
examples/plot_multilabel.py
|
236
|
4157
|
# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
|
bsd-3-clause
|
ilo10/scikit-learn
|
examples/svm/plot_custom_kernel.py
|
115
|
1546
|
"""
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
chrisburr/scikit-learn
|
examples/neighbors/plot_species_kde.py
|
282
|
4059
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
bsd-3-clause
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/tests/reshape/test_pivot.py
|
1
|
87636
|
from collections import OrderedDict
from datetime import date, datetime, timedelta
from itertools import product
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Grouper,
Index,
MultiIndex,
Series,
concat,
date_range,
)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.pivot import crosstab, pivot_table
import pandas.util.testing as tm
@pytest.fixture(params=[True, False])
def dropna(request):
return request.param
@pytest.fixture(params=[([0] * 4, [1] * 4), (range(0, 3), range(1, 4))])
def interval_values(request, closed):
left, right = request.param
return Categorical(pd.IntervalIndex.from_arrays(left, right, closed))
class TestPivotTable:
def setup_method(self, method):
self.data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
def test_pivot_table(self, observed):
index = ["A", "B"]
columns = "C"
table = pivot_table(
self.data, values="D", index=index, columns=columns, observed=observed
)
table2 = self.data.pivot_table(
values="D", index=index, columns=columns, observed=observed
)
tm.assert_frame_equal(table, table2)
# this works
pivot_table(self.data, values="D", index=index, observed=observed)
if len(index) > 1:
assert table.index.names == tuple(index)
else:
assert table.index.name == index[0]
if len(columns) > 1:
assert table.columns.names == columns
else:
assert table.columns.name == columns[0]
expected = self.data.groupby(index + [columns])["D"].agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_table_categorical_observed_equal(self, observed):
# issue #24923
df = pd.DataFrame(
{"col1": list("abcde"), "col2": list("fghij"), "col3": [1, 2, 3, 4, 5]}
)
expected = df.pivot_table(
index="col1", values="col3", columns="col2", aggfunc=np.sum, fill_value=0
)
expected.index = expected.index.astype("category")
expected.columns = expected.columns.astype("category")
df.col1 = df.col1.astype("category")
df.col2 = df.col2.astype("category")
result = df.pivot_table(
index="col1",
values="col3",
columns="col2",
aggfunc=np.sum,
fill_value=0,
observed=observed,
)
tm.assert_frame_equal(result, expected)
def test_pivot_table_nocols(self):
df = DataFrame(
{"rows": ["a", "b", "c"], "cols": ["x", "y", "z"], "values": [1, 2, 3]}
)
rs = df.pivot_table(columns="cols", aggfunc=np.sum)
xp = df.pivot_table(index="cols", aggfunc=np.sum).T
tm.assert_frame_equal(rs, xp)
rs = df.pivot_table(columns="cols", aggfunc={"values": "mean"})
xp = df.pivot_table(index="cols", aggfunc={"values": "mean"}).T
tm.assert_frame_equal(rs, xp)
def test_pivot_table_dropna(self):
df = DataFrame(
{
"amount": {0: 60000, 1: 100000, 2: 50000, 3: 30000},
"customer": {0: "A", 1: "A", 2: "B", 3: "C"},
"month": {0: 201307, 1: 201309, 2: 201308, 3: 201310},
"product": {0: "a", 1: "b", 2: "c", 3: "d"},
"quantity": {0: 2000000, 1: 500000, 2: 1000000, 3: 1000000},
}
)
pv_col = df.pivot_table(
"quantity", "month", ["customer", "product"], dropna=False
)
pv_ind = df.pivot_table(
"quantity", ["customer", "product"], "month", dropna=False
)
m = MultiIndex.from_tuples(
[
("A", "a"),
("A", "b"),
("A", "c"),
("A", "d"),
("B", "a"),
("B", "b"),
("B", "c"),
("B", "d"),
("C", "a"),
("C", "b"),
("C", "c"),
("C", "d"),
],
names=["customer", "product"],
)
tm.assert_index_equal(pv_col.columns, m)
tm.assert_index_equal(pv_ind.index, m)
def test_pivot_table_categorical(self):
cat1 = Categorical(
["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True
)
cat2 = Categorical(
["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True
)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values="values", index=["A", "B"], dropna=True)
exp_index = pd.MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
tm.assert_frame_equal(result, expected)
def test_pivot_table_dropna_categoricals(self, dropna):
# GH 15193
categories = ["a", "b", "c", "d"]
df = DataFrame(
{
"A": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
"B": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"C": range(0, 9),
}
)
df["A"] = df["A"].astype(CDT(categories, ordered=False))
result = df.pivot_table(index="B", columns="A", values="C", dropna=dropna)
expected_columns = Series(["a", "b", "c"], name="A")
expected_columns = expected_columns.astype(CDT(categories, ordered=False))
expected_index = Series([1, 2, 3], name="B")
expected = DataFrame(
[[0, 3, 6], [1, 4, 7], [2, 5, 8]],
index=expected_index,
columns=expected_columns,
)
if not dropna:
# add back the non observed to compare
expected = expected.reindex(columns=Categorical(categories)).astype("float")
tm.assert_frame_equal(result, expected)
def test_pivot_with_non_observable_dropna(self, dropna):
# gh-21133
df = pd.DataFrame(
{
"A": pd.Categorical(
[np.nan, "low", "high", "low", "high"],
categories=["low", "high"],
ordered=True,
),
"B": range(5),
}
)
result = df.pivot_table(index="A", values="B", dropna=dropna)
expected = pd.DataFrame(
{"B": [2, 3]},
index=pd.Index(
pd.Categorical.from_codes(
[0, 1], categories=["low", "high"], ordered=True
),
name="A",
),
)
tm.assert_frame_equal(result, expected)
# gh-21378
df = pd.DataFrame(
{
"A": pd.Categorical(
["left", "low", "high", "low", "high"],
categories=["low", "high", "left"],
ordered=True,
),
"B": range(5),
}
)
result = df.pivot_table(index="A", values="B", dropna=dropna)
expected = pd.DataFrame(
{"B": [2, 3, 0]},
index=pd.Index(
pd.Categorical.from_codes(
[0, 1, 2], categories=["low", "high", "left"], ordered=True
),
name="A",
),
)
tm.assert_frame_equal(result, expected)
def test_pivot_with_interval_index(self, interval_values, dropna):
# GH 25814
df = DataFrame({"A": interval_values, "B": 1})
result = df.pivot_table(index="A", values="B", dropna=dropna)
expected = DataFrame({"B": 1}, index=Index(interval_values.unique(), name="A"))
tm.assert_frame_equal(result, expected)
def test_pivot_with_interval_index_margins(self):
# GH 25815
ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2])
df = DataFrame(
{
"A": np.arange(4, 0, -1, dtype=np.intp),
"B": ["a", "b", "a", "b"],
"C": pd.Categorical(ordered_cat, ordered=True).sort_values(
ascending=False
),
}
)
pivot_tab = pd.pivot_table(
df, index="C", columns="B", values="A", aggfunc="sum", margins=True
)
result = pivot_tab["All"]
expected = Series(
[3, 7, 10],
index=Index([pd.Interval(0, 1), pd.Interval(1, 2), "All"], name="C"),
name="All",
dtype=np.intp,
)
tm.assert_series_equal(result, expected)
def test_pass_array(self):
result = self.data.pivot_table("D", index=self.data.A, columns=self.data.C)
expected = self.data.pivot_table("D", index="A", columns="C")
tm.assert_frame_equal(result, expected)
def test_pass_function(self):
result = self.data.pivot_table("D", index=lambda x: x // 5, columns=self.data.C)
expected = self.data.pivot_table("D", index=self.data.index // 5, columns="C")
tm.assert_frame_equal(result, expected)
def test_pivot_table_multiple(self):
index = ["A", "B"]
columns = "C"
table = pivot_table(self.data, index=index, columns=columns)
expected = self.data.groupby(index + [columns]).agg(np.mean).unstack()
tm.assert_frame_equal(table, expected)
def test_pivot_dtypes(self):
# can convert dtypes
f = DataFrame(
{
"a": ["cat", "bat", "cat", "bat"],
"v": [1, 2, 3, 4],
"i": ["a", "b", "a", "b"],
}
)
assert f.dtypes["v"] == "int64"
z = pivot_table(
f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc=np.sum
)
result = z.dtypes
expected = Series([np.dtype("int64")] * 2, index=Index(list("ab"), name="i"))
tm.assert_series_equal(result, expected)
# cannot convert dtypes
f = DataFrame(
{
"a": ["cat", "bat", "cat", "bat"],
"v": [1.5, 2.5, 3.5, 4.5],
"i": ["a", "b", "a", "b"],
}
)
assert f.dtypes["v"] == "float64"
z = pivot_table(
f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc=np.mean
)
result = z.dtypes
expected = Series([np.dtype("float64")] * 2, index=Index(list("ab"), name="i"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"columns,values",
[
("bool1", ["float1", "float2"]),
("bool1", ["float1", "float2", "bool1"]),
("bool2", ["float1", "float2", "bool1"]),
],
)
def test_pivot_preserve_dtypes(self, columns, values):
# GH 7142 regression test
v = np.arange(5, dtype=np.float64)
df = DataFrame(
{"float1": v, "float2": v + 2.0, "bool1": v <= 2, "bool2": v <= 3}
)
df_res = df.reset_index().pivot_table(
index="index", columns=columns, values=values
)
result = dict(df_res.dtypes)
expected = {
col: np.dtype("O") if col[0].startswith("b") else np.dtype("float64")
for col in df_res
}
assert result == expected
def test_pivot_no_values(self):
# GH 14380
idx = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-01-02", "2011-01-01", "2011-01-02"]
)
df = pd.DataFrame({"A": [1, 2, 3, 4, 5]}, index=idx)
res = df.pivot_table(index=df.index.month, columns=df.index.day)
exp_columns = pd.MultiIndex.from_tuples([("A", 1), ("A", 2)])
exp = pd.DataFrame(
[[2.5, 4.0], [2.0, np.nan]], index=[1, 2], columns=exp_columns
)
tm.assert_frame_equal(res, exp)
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5],
"dt": pd.date_range("2011-01-01", freq="D", periods=5),
},
index=idx,
)
res = df.pivot_table(
index=df.index.month, columns=pd.Grouper(key="dt", freq="M")
)
exp_columns = pd.MultiIndex.from_tuples([("A", pd.Timestamp("2011-01-31"))])
exp_columns.names = [None, "dt"]
exp = pd.DataFrame([3.25, 2.0], index=[1, 2], columns=exp_columns)
tm.assert_frame_equal(res, exp)
res = df.pivot_table(
index=pd.Grouper(freq="A"), columns=pd.Grouper(key="dt", freq="M")
)
exp = pd.DataFrame(
[3], index=pd.DatetimeIndex(["2011-12-31"]), columns=exp_columns
)
tm.assert_frame_equal(res, exp)
def test_pivot_multi_values(self):
result = pivot_table(
self.data, values=["D", "E"], index="A", columns=["B", "C"], fill_value=0
)
expected = pivot_table(
self.data.drop(["F"], axis=1), index="A", columns=["B", "C"], fill_value=0
)
tm.assert_frame_equal(result, expected)
def test_pivot_multi_functions(self):
f = lambda func: pivot_table(
self.data, values=["D", "E"], index=["A", "B"], columns="C", aggfunc=func
)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=["mean", "std"], axis=1)
tm.assert_frame_equal(result, expected)
# margins not supported??
f = lambda func: pivot_table(
self.data,
values=["D", "E"],
index=["A", "B"],
columns="C",
aggfunc=func,
margins=True,
)
result = f([np.mean, np.std])
means = f(np.mean)
stds = f(np.std)
expected = concat([means, stds], keys=["mean", "std"], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", [True, False])
def test_pivot_index_with_nan(self, method):
# GH 3588
nan = np.nan
df = DataFrame(
{
"a": ["R1", "R2", nan, "R4"],
"b": ["C1", "C2", "C3", "C4"],
"c": [10, 15, 17, 20],
}
)
if method:
result = df.pivot("a", "b", "c")
else:
result = pd.pivot(df, "a", "b", "c")
expected = DataFrame(
[
[nan, nan, 17, nan],
[10, nan, nan, nan],
[nan, 15, nan, nan],
[nan, nan, nan, 20],
],
index=Index([nan, "R1", "R2", "R4"], name="a"),
columns=Index(["C1", "C2", "C3", "C4"], name="b"),
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df.pivot("b", "a", "c"), expected.T)
# GH9491
df = DataFrame(
{
"a": pd.date_range("2014-02-01", periods=6, freq="D"),
"c": 100 + np.arange(6),
}
)
df["b"] = df["a"] - pd.Timestamp("2014-02-02")
df.loc[1, "a"] = df.loc[3, "a"] = nan
df.loc[1, "b"] = df.loc[4, "b"] = nan
if method:
pv = df.pivot("a", "b", "c")
else:
pv = pd.pivot(df, "a", "b", "c")
assert pv.notna().values.sum() == len(df)
for _, row in df.iterrows():
assert pv.loc[row["a"], row["b"]] == row["c"]
if method:
result = df.pivot("b", "a", "c")
else:
result = pd.pivot(df, "b", "a", "c")
tm.assert_frame_equal(result, pv.T)
@pytest.mark.parametrize("method", [True, False])
def test_pivot_with_tz(self, method):
# GH 5878
df = DataFrame(
{
"dt1": [
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
datetime(2013, 1, 1, 9, 0),
datetime(2013, 1, 2, 9, 0),
],
"dt2": [
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 1, 9, 0),
datetime(2014, 1, 2, 9, 0),
datetime(2014, 1, 2, 9, 0),
],
"data1": np.arange(4, dtype="int64"),
"data2": np.arange(4, dtype="int64"),
}
)
df["dt1"] = df["dt1"].apply(lambda d: pd.Timestamp(d, tz="US/Pacific"))
df["dt2"] = df["dt2"].apply(lambda d: pd.Timestamp(d, tz="Asia/Tokyo"))
exp_col1 = Index(["data1", "data1", "data2", "data2"])
exp_col2 = pd.DatetimeIndex(
["2014/01/01 09:00", "2014/01/02 09:00"] * 2, name="dt2", tz="Asia/Tokyo"
)
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame(
[[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.DatetimeIndex(
["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific"
),
columns=exp_col,
)
if method:
pv = df.pivot(index="dt1", columns="dt2")
else:
pv = pd.pivot(df, index="dt1", columns="dt2")
tm.assert_frame_equal(pv, expected)
expected = DataFrame(
[[0, 2], [1, 3]],
index=pd.DatetimeIndex(
["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific"
),
columns=pd.DatetimeIndex(
["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo"
),
)
if method:
pv = df.pivot(index="dt1", columns="dt2", values="data1")
else:
pv = pd.pivot(df, index="dt1", columns="dt2", values="data1")
tm.assert_frame_equal(pv, expected)
def test_pivot_tz_in_values(self):
# GH 14948
df = pd.DataFrame(
[
{
"uid": u"aa",
"ts": pd.Timestamp("2016-08-12 13:00:00-0700", tz="US/Pacific"),
},
{
"uid": u"aa",
"ts": pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"),
},
{
"uid": u"aa",
"ts": pd.Timestamp("2016-08-12 14:00:00-0700", tz="US/Pacific"),
},
{
"uid": u"aa",
"ts": pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"),
},
{
"uid": u"aa",
"ts": pd.Timestamp("2016-08-25 13:00:00-0700", tz="US/Pacific"),
},
]
)
df = df.set_index("ts").reset_index()
mins = df.ts.map(lambda x: x.replace(hour=0, minute=0, second=0, microsecond=0))
result = pd.pivot_table(
df.set_index("ts").reset_index(),
values="ts",
index=["uid"],
columns=[mins],
aggfunc=np.min,
)
expected = pd.DataFrame(
[
[
pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"),
pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"),
]
],
index=pd.Index(["aa"], name="uid"),
columns=pd.DatetimeIndex(
[
pd.Timestamp("2016-08-12 00:00:00", tz="US/Pacific"),
pd.Timestamp("2016-08-25 00:00:00", tz="US/Pacific"),
],
name="ts",
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", [True, False])
def test_pivot_periods(self, method):
df = DataFrame(
{
"p1": [
pd.Period("2013-01-01", "D"),
pd.Period("2013-01-02", "D"),
pd.Period("2013-01-01", "D"),
pd.Period("2013-01-02", "D"),
],
"p2": [
pd.Period("2013-01", "M"),
pd.Period("2013-01", "M"),
pd.Period("2013-02", "M"),
pd.Period("2013-02", "M"),
],
"data1": np.arange(4, dtype="int64"),
"data2": np.arange(4, dtype="int64"),
}
)
exp_col1 = Index(["data1", "data1", "data2", "data2"])
exp_col2 = pd.PeriodIndex(["2013-01", "2013-02"] * 2, name="p2", freq="M")
exp_col = pd.MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame(
[[0, 2, 0, 2], [1, 3, 1, 3]],
index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"),
columns=exp_col,
)
if method:
pv = df.pivot(index="p1", columns="p2")
else:
pv = pd.pivot(df, index="p1", columns="p2")
tm.assert_frame_equal(pv, expected)
expected = DataFrame(
[[0, 2], [1, 3]],
index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"),
columns=pd.PeriodIndex(["2013-01", "2013-02"], name="p2", freq="M"),
)
if method:
pv = df.pivot(index="p1", columns="p2", values="data1")
else:
pv = pd.pivot(df, index="p1", columns="p2", values="data1")
tm.assert_frame_equal(pv, expected)
@pytest.mark.parametrize(
"values",
[
["baz", "zoo"],
np.array(["baz", "zoo"]),
pd.Series(["baz", "zoo"]),
pd.Index(["baz", "zoo"]),
],
)
@pytest.mark.parametrize("method", [True, False])
def test_pivot_with_list_like_values(self, values, method):
# issue #17160
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
if method:
result = df.pivot(index="foo", columns="bar", values=values)
else:
result = pd.pivot(df, index="foo", columns="bar", values=values)
data = [[1, 2, 3, "x", "y", "z"], [4, 5, 6, "q", "w", "t"]]
index = Index(data=["one", "two"], name="foo")
columns = MultiIndex(
levels=[["baz", "zoo"], ["A", "B", "C"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
names=[None, "bar"],
)
expected = DataFrame(data=data, index=index, columns=columns, dtype="object")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
["bar", "baz"],
np.array(["bar", "baz"]),
pd.Series(["bar", "baz"]),
pd.Index(["bar", "baz"]),
],
)
@pytest.mark.parametrize("method", [True, False])
def test_pivot_with_list_like_values_nans(self, values, method):
# issue #17160
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
if method:
result = df.pivot(index="zoo", columns="foo", values=values)
else:
result = pd.pivot(df, index="zoo", columns="foo", values=values)
data = [
[np.nan, "A", np.nan, 4],
[np.nan, "C", np.nan, 6],
[np.nan, "B", np.nan, 5],
["A", np.nan, 1, np.nan],
["B", np.nan, 2, np.nan],
["C", np.nan, 3, np.nan],
]
index = Index(data=["q", "t", "w", "x", "y", "z"], name="zoo")
columns = MultiIndex(
levels=[["bar", "baz"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[None, "foo"],
)
expected = DataFrame(data=data, index=index, columns=columns, dtype="object")
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(
reason="MultiIndexed unstack with tuple names fails with KeyError GH#19966"
)
@pytest.mark.parametrize("method", [True, False])
def test_pivot_with_multiindex(self, method):
# issue #17160
index = Index(data=[0, 1, 2, 3, 4, 5])
data = [
["one", "A", 1, "x"],
["one", "B", 2, "y"],
["one", "C", 3, "z"],
["two", "A", 4, "q"],
["two", "B", 5, "w"],
["two", "C", 6, "t"],
]
columns = MultiIndex(
levels=[["bar", "baz"], ["first", "second"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
)
df = DataFrame(data=data, index=index, columns=columns, dtype="object")
if method:
result = df.pivot(
index=("bar", "first"),
columns=("bar", "second"),
values=("baz", "first"),
)
else:
result = pd.pivot(
df,
index=("bar", "first"),
columns=("bar", "second"),
values=("baz", "first"),
)
data = {
"A": Series([1, 4], index=["one", "two"]),
"B": Series([2, 5], index=["one", "two"]),
"C": Series([3, 6], index=["one", "two"]),
}
expected = DataFrame(data)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", [True, False])
def test_pivot_with_tuple_of_values(self, method):
# issue #17160
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.raises(KeyError, match=r"^\('bar', 'baz'\)$"):
# tuple is seen as a single column name
if method:
df.pivot(index="zoo", columns="foo", values=("bar", "baz"))
else:
pd.pivot(df, index="zoo", columns="foo", values=("bar", "baz"))
def test_margins(self):
def _check_output(
result, values_col, index=["A", "B"], columns=["C"], margins_col="All"
):
col_margins = result.loc[result.index[:-1], margins_col]
expected_col_margins = self.data.groupby(index)[values_col].mean()
tm.assert_series_equal(col_margins, expected_col_margins, check_names=False)
assert col_margins.name == margins_col
result = result.sort_index()
index_margins = result.loc[(margins_col, "")].iloc[:-1]
expected_ix_margins = self.data.groupby(columns)[values_col].mean()
tm.assert_series_equal(
index_margins, expected_ix_margins, check_names=False
)
assert index_margins.name == (margins_col, "")
grand_total_margins = result.loc[(margins_col, ""), margins_col]
expected_total_margins = self.data[values_col].mean()
assert grand_total_margins == expected_total_margins
# column specified
result = self.data.pivot_table(
values="D", index=["A", "B"], columns="C", margins=True, aggfunc=np.mean
)
_check_output(result, "D")
# Set a different margins_name (not 'All')
result = self.data.pivot_table(
values="D",
index=["A", "B"],
columns="C",
margins=True,
aggfunc=np.mean,
margins_name="Totals",
)
_check_output(result, "D", margins_col="Totals")
# no column specified
table = self.data.pivot_table(
index=["A", "B"], columns="C", margins=True, aggfunc=np.mean
)
for value_col in table.columns.levels[0]:
_check_output(table[value_col], value_col)
# no col
# to help with a buglet
self.data.columns = [k * 2 for k in self.data.columns]
table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc=np.mean)
for value_col in table.columns:
totals = table.loc[("All", ""), value_col]
assert totals == self.data[value_col].mean()
# no rows
rtable = self.data.pivot_table(
columns=["AA", "BB"], margins=True, aggfunc=np.mean
)
assert isinstance(rtable, Series)
table = self.data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean")
for item in ["DD", "EE", "FF"]:
totals = table.loc[("All", ""), item]
assert totals == self.data[item].mean()
def test_margins_dtype(self):
# GH 17013
df = self.data.copy()
df[["D", "E", "F"]] = np.arange(len(df) * 3).reshape(len(df), 3)
mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")]
mi = MultiIndex.from_tuples(mi_val, names=("A", "B"))
expected = DataFrame(
{"dull": [12, 21, 3, 9, 45], "shiny": [33, 0, 36, 51, 120]}, index=mi
).rename_axis("C", axis=1)
expected["All"] = expected["dull"] + expected["shiny"]
result = df.pivot_table(
values="D",
index=["A", "B"],
columns="C",
margins=True,
aggfunc=np.sum,
fill_value=0,
)
tm.assert_frame_equal(expected, result)
@pytest.mark.xfail(reason="GH#17035 (len of floats is casted back to floats)")
def test_margins_dtype_len(self):
mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")]
mi = MultiIndex.from_tuples(mi_val, names=("A", "B"))
expected = DataFrame(
{"dull": [1, 1, 2, 1, 5], "shiny": [2, 0, 2, 2, 6]}, index=mi
).rename_axis("C", axis=1)
expected["All"] = expected["dull"] + expected["shiny"]
result = self.data.pivot_table(
values="D",
index=["A", "B"],
columns="C",
margins=True,
aggfunc=len,
fill_value=0,
)
tm.assert_frame_equal(expected, result)
def test_pivot_integer_columns(self):
# caused by upstream bug in unstack
d = date.min
data = list(
product(
["foo", "bar"],
["A", "B", "C"],
["x1", "x2"],
[d + timedelta(i) for i in range(20)],
[1.0],
)
)
df = DataFrame(data)
table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2])
df2 = df.rename(columns=str)
table2 = df2.pivot_table(values="4", index=["0", "1", "3"], columns=["2"])
tm.assert_frame_equal(table, table2, check_names=False)
def test_pivot_no_level_overlap(self):
# GH #1181
data = DataFrame(
{
"a": ["a", "a", "a", "a", "b", "b", "b", "b"] * 2,
"b": [0, 0, 0, 0, 1, 1, 1, 1] * 2,
"c": (["foo"] * 4 + ["bar"] * 4) * 2,
"value": np.random.randn(16),
}
)
table = data.pivot_table("value", index="a", columns=["b", "c"])
grouped = data.groupby(["a", "b", "c"])["value"].mean()
expected = grouped.unstack("b").unstack("c").dropna(axis=1, how="all")
tm.assert_frame_equal(table, expected)
def test_pivot_columns_lexsorted(self):
n = 10000
dtype = np.dtype(
[
("Index", object),
("Symbol", object),
("Year", int),
("Month", int),
("Day", int),
("Quantity", int),
("Price", float),
]
)
products = np.array(
[
("SP500", "ADBE"),
("SP500", "NVDA"),
("SP500", "ORCL"),
("NDQ100", "AAPL"),
("NDQ100", "MSFT"),
("NDQ100", "GOOG"),
("FTSE", "DGE.L"),
("FTSE", "TSCO.L"),
("FTSE", "GSK.L"),
],
dtype=[("Index", object), ("Symbol", object)],
)
items = np.empty(n, dtype=dtype)
iproduct = np.random.randint(0, len(products), n)
items["Index"] = products["Index"][iproduct]
items["Symbol"] = products["Symbol"][iproduct]
dr = pd.date_range(date(2000, 1, 1), date(2010, 12, 31))
dates = dr[np.random.randint(0, len(dr), n)]
items["Year"] = dates.year
items["Month"] = dates.month
items["Day"] = dates.day
items["Price"] = np.random.lognormal(4.0, 2.0, n)
df = DataFrame(items)
pivoted = df.pivot_table(
"Price",
index=["Month", "Day"],
columns=["Index", "Symbol", "Year"],
aggfunc="mean",
)
assert pivoted.columns.is_monotonic
def test_pivot_complex_aggfunc(self):
f = OrderedDict([("D", ["std"]), ("E", ["sum"])])
expected = self.data.groupby(["A", "B"]).agg(f).unstack("B")
result = self.data.pivot_table(index="A", columns="B", aggfunc=f)
tm.assert_frame_equal(result, expected)
def test_margins_no_values_no_cols(self):
# Regression test on pivot table: no values or cols passed.
result = self.data[["A", "B"]].pivot_table(
index=["A", "B"], aggfunc=len, margins=True
)
result_list = result.tolist()
assert sum(result_list[:-1]) == result_list[-1]
def test_margins_no_values_two_rows(self):
# Regression test on pivot table: no values passed but rows are a
# multi-index
result = self.data[["A", "B", "C"]].pivot_table(
index=["A", "B"], columns="C", aggfunc=len, margins=True
)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
def test_margins_no_values_one_row_one_col(self):
# Regression test on pivot table: no values passed but row and col
# defined
result = self.data[["A", "B"]].pivot_table(
index="A", columns="B", aggfunc=len, margins=True
)
assert result.All.tolist() == [4.0, 7.0, 11.0]
def test_margins_no_values_two_row_two_cols(self):
# Regression test on pivot table: no values passed but rows and cols
# are multi-indexed
self.data["D"] = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]
result = self.data[["A", "B", "C", "D"]].pivot_table(
index=["A", "B"], columns=["C", "D"], aggfunc=len, margins=True
)
assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0]
@pytest.mark.parametrize("margin_name", ["foo", "one", 666, None, ["a", "b"]])
def test_pivot_table_with_margins_set_margin_name(self, margin_name):
# see gh-3335
msg = (
r'Conflicting name "{}" in margins|'
"margins_name argument must be a string"
).format(margin_name)
with pytest.raises(ValueError, match=msg):
# multi-index index
pivot_table(
self.data,
values="D",
index=["A", "B"],
columns=["C"],
margins=True,
margins_name=margin_name,
)
with pytest.raises(ValueError, match=msg):
# multi-index column
pivot_table(
self.data,
values="D",
index=["C"],
columns=["A", "B"],
margins=True,
margins_name=margin_name,
)
with pytest.raises(ValueError, match=msg):
# non-multi-index index/column
pivot_table(
self.data,
values="D",
index=["A"],
columns=["B"],
margins=True,
margins_name=margin_name,
)
def test_pivot_timegrouper(self):
df = DataFrame(
{
"Branch": "A A A A A A A B".split(),
"Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
"Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
"Date": [
datetime(2013, 1, 1),
datetime(2013, 1, 1),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 10, 1),
datetime(2013, 10, 2),
datetime(2013, 12, 2),
datetime(2013, 12, 2),
],
}
).set_index("Date")
expected = DataFrame(
np.array([10, 18, 3], dtype="int64").reshape(1, 3),
index=[datetime(2013, 12, 31)],
columns="Carl Joe Mark".split(),
)
expected.index.name = "Date"
expected.columns.name = "Buyer"
result = pivot_table(
df,
index=Grouper(freq="A"),
columns="Buyer",
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index="Buyer",
columns=Grouper(freq="A"),
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected.T)
expected = DataFrame(
np.array([1, np.nan, 3, 9, 18, np.nan]).reshape(2, 3),
index=[datetime(2013, 1, 1), datetime(2013, 7, 1)],
columns="Carl Joe Mark".split(),
)
expected.index.name = "Date"
expected.columns.name = "Buyer"
result = pivot_table(
df,
index=Grouper(freq="6MS"),
columns="Buyer",
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index="Buyer",
columns=Grouper(freq="6MS"),
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected.T)
# passing the name
df = df.reset_index()
result = pivot_table(
df,
index=Grouper(freq="6MS", key="Date"),
columns="Buyer",
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index="Buyer",
columns=Grouper(freq="6MS", key="Date"),
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected.T)
msg = "'The grouper name foo is not found'"
with pytest.raises(KeyError, match=msg):
pivot_table(
df,
index=Grouper(freq="6MS", key="foo"),
columns="Buyer",
values="Quantity",
aggfunc=np.sum,
)
with pytest.raises(KeyError, match=msg):
pivot_table(
df,
index="Buyer",
columns=Grouper(freq="6MS", key="foo"),
values="Quantity",
aggfunc=np.sum,
)
# passing the level
df = df.set_index("Date")
result = pivot_table(
df,
index=Grouper(freq="6MS", level="Date"),
columns="Buyer",
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index="Buyer",
columns=Grouper(freq="6MS", level="Date"),
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected.T)
msg = "The level foo is not valid"
with pytest.raises(ValueError, match=msg):
pivot_table(
df,
index=Grouper(freq="6MS", level="foo"),
columns="Buyer",
values="Quantity",
aggfunc=np.sum,
)
with pytest.raises(ValueError, match=msg):
pivot_table(
df,
index="Buyer",
columns=Grouper(freq="6MS", level="foo"),
values="Quantity",
aggfunc=np.sum,
)
# double grouper
df = DataFrame(
{
"Branch": "A A A A A A A B".split(),
"Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(),
"Quantity": [1, 3, 5, 1, 8, 1, 9, 3],
"Date": [
datetime(2013, 11, 1, 13, 0),
datetime(2013, 9, 1, 13, 5),
datetime(2013, 10, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 11, 1, 20, 0),
datetime(2013, 10, 2, 10, 0),
datetime(2013, 10, 2, 12, 0),
datetime(2013, 12, 5, 14, 0),
],
"PayDay": [
datetime(2013, 10, 4, 0, 0),
datetime(2013, 10, 15, 13, 5),
datetime(2013, 9, 5, 20, 0),
datetime(2013, 11, 2, 10, 0),
datetime(2013, 10, 7, 20, 0),
datetime(2013, 9, 5, 10, 0),
datetime(2013, 12, 30, 12, 0),
datetime(2013, 11, 20, 14, 0),
],
}
)
result = pivot_table(
df,
index=Grouper(freq="M", key="Date"),
columns=Grouper(freq="M", key="PayDay"),
values="Quantity",
aggfunc=np.sum,
)
expected = DataFrame(
np.array(
[
np.nan,
3,
np.nan,
np.nan,
6,
np.nan,
1,
9,
np.nan,
9,
np.nan,
np.nan,
np.nan,
np.nan,
3,
np.nan,
]
).reshape(4, 4),
index=[
datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31),
],
columns=[
datetime(2013, 9, 30),
datetime(2013, 10, 31),
datetime(2013, 11, 30),
datetime(2013, 12, 31),
],
)
expected.index.name = "Date"
expected.columns.name = "PayDay"
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index=Grouper(freq="M", key="PayDay"),
columns=Grouper(freq="M", key="Date"),
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected.T)
tuples = [
(datetime(2013, 9, 30), datetime(2013, 10, 31)),
(datetime(2013, 10, 31), datetime(2013, 9, 30)),
(datetime(2013, 10, 31), datetime(2013, 11, 30)),
(datetime(2013, 10, 31), datetime(2013, 12, 31)),
(datetime(2013, 11, 30), datetime(2013, 10, 31)),
(datetime(2013, 12, 31), datetime(2013, 11, 30)),
]
idx = MultiIndex.from_tuples(tuples, names=["Date", "PayDay"])
expected = DataFrame(
np.array(
[3, np.nan, 6, np.nan, 1, np.nan, 9, np.nan, 9, np.nan, np.nan, 3]
).reshape(6, 2),
index=idx,
columns=["A", "B"],
)
expected.columns.name = "Branch"
result = pivot_table(
df,
index=[Grouper(freq="M", key="Date"), Grouper(freq="M", key="PayDay")],
columns=["Branch"],
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index=["Branch"],
columns=[Grouper(freq="M", key="Date"), Grouper(freq="M", key="PayDay")],
values="Quantity",
aggfunc=np.sum,
)
tm.assert_frame_equal(result, expected.T)
def test_pivot_datetime_tz(self):
dates1 = [
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
]
dates2 = [
"2013-01-01 15:00:00",
"2013-01-01 15:00:00",
"2013-01-01 15:00:00",
"2013-02-01 15:00:00",
"2013-02-01 15:00:00",
"2013-02-01 15:00:00",
]
df = DataFrame(
{
"label": ["a", "a", "a", "b", "b", "b"],
"dt1": dates1,
"dt2": dates2,
"value1": np.arange(6, dtype="int64"),
"value2": [1, 2] * 3,
}
)
df["dt1"] = df["dt1"].apply(lambda d: pd.Timestamp(d, tz="US/Pacific"))
df["dt2"] = df["dt2"].apply(lambda d: pd.Timestamp(d, tz="Asia/Tokyo"))
exp_idx = pd.DatetimeIndex(
["2011-07-19 07:00:00", "2011-07-19 08:00:00", "2011-07-19 09:00:00"],
tz="US/Pacific",
name="dt1",
)
exp_col1 = Index(["value1", "value1"])
exp_col2 = Index(["a", "b"], name="label")
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2])
expected = DataFrame([[0, 3], [1, 4], [2, 5]], index=exp_idx, columns=exp_col)
result = pivot_table(df, index=["dt1"], columns=["label"], values=["value1"])
tm.assert_frame_equal(result, expected)
exp_col1 = Index(["sum", "sum", "sum", "sum", "mean", "mean", "mean", "mean"])
exp_col2 = Index(["value1", "value1", "value2", "value2"] * 2)
exp_col3 = pd.DatetimeIndex(
["2013-01-01 15:00:00", "2013-02-01 15:00:00"] * 4,
tz="Asia/Tokyo",
name="dt2",
)
exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3])
expected = DataFrame(
np.array(
[
[0, 3, 1, 2, 0, 3, 1, 2],
[1, 4, 2, 1, 1, 4, 2, 1],
[2, 5, 1, 2, 2, 5, 1, 2],
],
dtype="int64",
),
index=exp_idx,
columns=exp_col,
)
result = pivot_table(
df,
index=["dt1"],
columns=["dt2"],
values=["value1", "value2"],
aggfunc=[np.sum, np.mean],
)
tm.assert_frame_equal(result, expected)
def test_pivot_dtaccessor(self):
# GH 8103
dates1 = [
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
"2011-07-19 07:00:00",
"2011-07-19 08:00:00",
"2011-07-19 09:00:00",
]
dates2 = [
"2013-01-01 15:00:00",
"2013-01-01 15:00:00",
"2013-01-01 15:00:00",
"2013-02-01 15:00:00",
"2013-02-01 15:00:00",
"2013-02-01 15:00:00",
]
df = DataFrame(
{
"label": ["a", "a", "a", "b", "b", "b"],
"dt1": dates1,
"dt2": dates2,
"value1": np.arange(6, dtype="int64"),
"value2": [1, 2] * 3,
}
)
df["dt1"] = df["dt1"].apply(lambda d: pd.Timestamp(d))
df["dt2"] = df["dt2"].apply(lambda d: pd.Timestamp(d))
result = pivot_table(
df, index="label", columns=df["dt1"].dt.hour, values="value1"
)
exp_idx = Index(["a", "b"], name="label")
expected = DataFrame(
{7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=exp_idx,
columns=Index([7, 8, 9], name="dt1"),
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df, index=df["dt2"].dt.month, columns=df["dt1"].dt.hour, values="value1"
)
expected = DataFrame(
{7: [0, 3], 8: [1, 4], 9: [2, 5]},
index=Index([1, 2], name="dt2"),
columns=Index([7, 8, 9], name="dt1"),
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index=df["dt2"].dt.year.values,
columns=[df["dt1"].dt.hour, df["dt2"].dt.month],
values="value1",
)
exp_col = MultiIndex.from_arrays(
[[7, 7, 8, 8, 9, 9], [1, 2] * 3], names=["dt1", "dt2"]
)
expected = DataFrame(
np.array([[0, 3, 1, 4, 2, 5]], dtype="int64"), index=[2013], columns=exp_col
)
tm.assert_frame_equal(result, expected)
result = pivot_table(
df,
index=np.array(["X", "X", "X", "X", "Y", "Y"]),
columns=[df["dt1"].dt.hour, df["dt2"].dt.month],
values="value1",
)
expected = DataFrame(
np.array(
[[0, 3, 1, np.nan, 2, np.nan], [np.nan, np.nan, np.nan, 4, np.nan, 5]]
),
index=["X", "Y"],
columns=exp_col,
)
tm.assert_frame_equal(result, expected)
def test_daily(self):
rng = date_range("1/1/2000", "12/31/2004", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(
DataFrame(ts), index=ts.index.year, columns=ts.index.dayofyear
)
annual.columns = annual.columns.droplevel(0)
doy = np.asarray(ts.index.dayofyear)
for i in range(1, 367):
subset = ts[doy == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_monthly(self):
rng = date_range("1/1/2000", "12/31/2004", freq="M")
ts = Series(np.random.randn(len(rng)), index=rng)
annual = pivot_table(
pd.DataFrame(ts), index=ts.index.year, columns=ts.index.month
)
annual.columns = annual.columns.droplevel(0)
month = ts.index.month
for i in range(1, 13):
subset = ts[month == i]
subset.index = subset.index.year
result = annual[i].dropna()
tm.assert_series_equal(result, subset, check_names=False)
assert result.name == i
def test_pivot_table_with_iterator_values(self):
# GH 12017
aggs = {"D": "sum", "E": "mean"}
pivot_values_list = pd.pivot_table(
self.data, index=["A"], values=list(aggs.keys()), aggfunc=aggs
)
pivot_values_keys = pd.pivot_table(
self.data, index=["A"], values=aggs.keys(), aggfunc=aggs
)
tm.assert_frame_equal(pivot_values_keys, pivot_values_list)
agg_values_gen = (value for value in aggs.keys())
pivot_values_gen = pd.pivot_table(
self.data, index=["A"], values=agg_values_gen, aggfunc=aggs
)
tm.assert_frame_equal(pivot_values_gen, pivot_values_list)
def test_pivot_table_margins_name_with_aggfunc_list(self):
# GH 13354
margins_name = "Weekly"
costs = pd.DataFrame(
{
"item": ["bacon", "cheese", "bacon", "cheese"],
"cost": [2.5, 4.5, 3.2, 3.3],
"day": ["M", "M", "T", "T"],
}
)
table = costs.pivot_table(
index="item",
columns="day",
margins=True,
margins_name=margins_name,
aggfunc=[np.mean, max],
)
ix = pd.Index(["bacon", "cheese", margins_name], dtype="object", name="item")
tups = [
("mean", "cost", "M"),
("mean", "cost", "T"),
("mean", "cost", margins_name),
("max", "cost", "M"),
("max", "cost", "T"),
("max", "cost", margins_name),
]
cols = pd.MultiIndex.from_tuples(tups, names=[None, None, "day"])
expected = pd.DataFrame(table.values, index=ix, columns=cols)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to ints)")
def test_categorical_margins(self, observed):
# GH 10989
df = pd.DataFrame(
{"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2}
)
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, "All"], name="y")
expected.columns = Index([0, 1, "All"], name="z")
table = df.pivot_table("x", "y", "z", dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
@pytest.mark.xfail(reason="GH#17035 (np.mean of ints is casted back to ints)")
def test_categorical_margins_category(self, observed):
df = pd.DataFrame(
{"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2}
)
expected = pd.DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]])
expected.index = Index([0, 1, "All"], name="y")
expected.columns = Index([0, 1, "All"], name="z")
df.y = df.y.astype("category")
df.z = df.z.astype("category")
table = df.pivot_table("x", "y", "z", dropna=observed, margins=True)
tm.assert_frame_equal(table, expected)
def test_categorical_aggfunc(self, observed):
# GH 9534
df = pd.DataFrame(
{"C1": ["A", "B", "C", "C"], "C2": ["a", "a", "b", "b"], "V": [1, 2, 3, 4]}
)
df["C1"] = df["C1"].astype("category")
result = df.pivot_table(
"V", index="C1", columns="C2", dropna=observed, aggfunc="count"
)
expected_index = pd.CategoricalIndex(
["A", "B", "C"], categories=["A", "B", "C"], ordered=False, name="C1"
)
expected_columns = pd.Index(["a", "b"], name="C2")
expected_data = np.array([[1.0, np.nan], [1.0, np.nan], [np.nan, 2.0]])
expected = pd.DataFrame(
expected_data, index=expected_index, columns=expected_columns
)
tm.assert_frame_equal(result, expected)
def test_categorical_pivot_index_ordering(self, observed):
# GH 8731
df = pd.DataFrame(
{
"Sales": [100, 120, 220],
"Month": ["January", "January", "January"],
"Year": [2013, 2014, 2013],
}
)
months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
df["Month"] = df["Month"].astype("category").cat.set_categories(months)
result = df.pivot_table(
values="Sales",
index="Month",
columns="Year",
dropna=observed,
aggfunc="sum",
)
expected_columns = pd.Int64Index([2013, 2014], name="Year")
expected_index = pd.CategoricalIndex(
["January"], categories=months, ordered=False, name="Month"
)
expected = pd.DataFrame(
[[320, 120]], index=expected_index, columns=expected_columns
)
if not observed:
result = result.dropna().astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_pivot_table_not_series(self):
# GH 4386
# pivot_table always returns a DataFrame
# when values is not list like and columns is None
# and aggfunc is not instance of list
df = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"], "col3": [1, 3, 9]})
result = df.pivot_table("col1", index=["col3", "col2"], aggfunc=np.sum)
m = MultiIndex.from_arrays([[1, 3, 9], ["C", "D", "E"]], names=["col3", "col2"])
expected = DataFrame([3, 4, 5], index=m, columns=["col1"])
tm.assert_frame_equal(result, expected)
result = df.pivot_table("col1", index="col3", columns="col2", aggfunc=np.sum)
expected = DataFrame(
[[3, np.NaN, np.NaN], [np.NaN, 4, np.NaN], [np.NaN, np.NaN, 5]],
index=Index([1, 3, 9], name="col3"),
columns=Index(["C", "D", "E"], name="col2"),
)
tm.assert_frame_equal(result, expected)
result = df.pivot_table("col1", index="col3", aggfunc=[np.sum])
m = MultiIndex.from_arrays([["sum"], ["col1"]])
expected = DataFrame([3, 4, 5], index=Index([1, 3, 9], name="col3"), columns=m)
tm.assert_frame_equal(result, expected)
def test_pivot_margins_name_unicode(self):
# issue #13292
greek = "\u0394\u03bf\u03ba\u03b9\u03bc\u03ae"
frame = pd.DataFrame({"foo": [1, 2, 3]})
table = pd.pivot_table(
frame, index=["foo"], aggfunc=len, margins=True, margins_name=greek
)
index = pd.Index([1, 2, 3, greek], dtype="object", name="foo")
expected = pd.DataFrame(index=index)
tm.assert_frame_equal(table, expected)
def test_pivot_string_as_func(self):
# GH #18713
# for correctness purposes
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": range(11),
}
)
result = pivot_table(data, index="A", columns="B", aggfunc="sum")
mi = MultiIndex(
levels=[["C"], ["one", "two"]], codes=[[0, 0], [0, 1]], names=[None, "B"]
)
expected = DataFrame(
{("C", "one"): {"bar": 15, "foo": 13}, ("C", "two"): {"bar": 7, "foo": 20}},
columns=mi,
).rename_axis("A")
tm.assert_frame_equal(result, expected)
result = pivot_table(data, index="A", columns="B", aggfunc=["sum", "mean"])
mi = MultiIndex(
levels=[["sum", "mean"], ["C"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
names=[None, None, "B"],
)
expected = DataFrame(
{
("mean", "C", "one"): {"bar": 5.0, "foo": 3.25},
("mean", "C", "two"): {"bar": 7.0, "foo": 6.666666666666667},
("sum", "C", "one"): {"bar": 15, "foo": 13},
("sum", "C", "two"): {"bar": 7, "foo": 20},
},
columns=mi,
).rename_axis("A")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"f, f_numpy",
[
("sum", np.sum),
("mean", np.mean),
("std", np.std),
(["sum", "mean"], [np.sum, np.mean]),
(["sum", "std"], [np.sum, np.std]),
(["std", "mean"], [np.std, np.mean]),
],
)
def test_pivot_string_func_vs_func(self, f, f_numpy):
# GH #18713
# for consistency purposes
result = pivot_table(self.data, index="A", columns="B", aggfunc=f)
expected = pivot_table(self.data, index="A", columns="B", aggfunc=f_numpy)
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_pivot_number_of_levels_larger_than_int32(self):
# GH 20601
df = DataFrame(
{"ind1": np.arange(2 ** 16), "ind2": np.arange(2 ** 16), "count": 0}
)
msg = "Unstacked DataFrame is too big, causing int32 overflow"
with pytest.raises(ValueError, match=msg):
df.pivot_table(
index="ind1", columns="ind2", values="count", aggfunc="count"
)
def test_pivot_table_aggfunc_dropna(self, dropna):
# GH 22159
df = pd.DataFrame(
{
"fruit": ["apple", "peach", "apple"],
"size": [1, 1, 2],
"taste": [7, 6, 6],
}
)
def ret_one(x):
return 1
def ret_sum(x):
return sum(x)
def ret_none(x):
return np.nan
result = pd.pivot_table(
df, columns="fruit", aggfunc=[ret_sum, ret_none, ret_one], dropna=dropna
)
data = [[3, 1, np.nan, np.nan, 1, 1], [13, 6, np.nan, np.nan, 1, 1]]
col = pd.MultiIndex.from_product(
[["ret_sum", "ret_none", "ret_one"], ["apple", "peach"]],
names=[None, "fruit"],
)
expected = pd.DataFrame(data, index=["size", "taste"], columns=col)
if dropna:
expected = expected.dropna(axis="columns")
tm.assert_frame_equal(result, expected)
def test_pivot_table_aggfunc_scalar_dropna(self, dropna):
# GH 22159
df = pd.DataFrame(
{"A": ["one", "two", "one"], "x": [3, np.nan, 2], "y": [1, np.nan, np.nan]}
)
result = pd.pivot_table(df, columns="A", aggfunc=np.mean, dropna=dropna)
data = [[2.5, np.nan], [1, np.nan]]
col = pd.Index(["one", "two"], name="A")
expected = pd.DataFrame(data, index=["x", "y"], columns=col)
if dropna:
expected = expected.dropna(axis="columns")
tm.assert_frame_equal(result, expected)
class TestCrosstab:
def setup_method(self, method):
df = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
self.df = df.append(df, ignore_index=True)
def test_crosstab_single(self):
df = self.df
result = crosstab(df["A"], df["C"])
expected = df.groupby(["A", "C"]).size().unstack()
tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64))
def test_crosstab_multiple(self):
df = self.df
result = crosstab(df["A"], [df["B"], df["C"]])
expected = df.groupby(["A", "B", "C"]).size()
expected = expected.unstack("B").unstack("C").fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
result = crosstab([df["B"], df["C"]], df["A"])
expected = df.groupby(["B", "C", "A"]).size()
expected = expected.unstack("A").fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_crosstab_ndarray(self):
a = np.random.randint(0, 5, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 10, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"))
expected = crosstab(df["a"], [df["b"], df["c"]])
tm.assert_frame_equal(result, expected)
result = crosstab([b, c], a, colnames=["a"], rownames=("b", "c"))
expected = crosstab([df["b"], df["c"]], df["a"])
tm.assert_frame_equal(result, expected)
# assign arbitrary names
result = crosstab(self.df["A"].values, self.df["C"].values)
assert result.index.name == "row_0"
assert result.columns.name == "col_0"
def test_crosstab_non_aligned(self):
# GH 17005
a = pd.Series([0, 1, 1], index=["a", "b", "c"])
b = pd.Series([3, 4, 3, 4, 3], index=["a", "b", "c", "d", "f"])
c = np.array([3, 4, 3])
expected = pd.DataFrame(
[[1, 0], [1, 1]],
index=Index([0, 1], name="row_0"),
columns=Index([3, 4], name="col_0"),
)
result = crosstab(a, b)
tm.assert_frame_equal(result, expected)
result = crosstab(a, c)
tm.assert_frame_equal(result, expected)
def test_crosstab_margins(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"), margins=True)
assert result.index.names == ("a",)
assert result.columns.names == ["b", "c"]
all_cols = result["All", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
# to keep index.name
exp_margin = Series([len(df)], index=Index(["All"], name="a"))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ("All", "")
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc["All"]
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
exp_rows = exp_rows.append(Series([len(df)], index=[("All", "")]))
exp_rows.name = "All"
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
def test_crosstab_margins_set_margin_name(self):
# GH 15972
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
df = DataFrame({"a": a, "b": b, "c": c})
result = crosstab(
a,
[b, c],
rownames=["a"],
colnames=("b", "c"),
margins=True,
margins_name="TOTAL",
)
assert result.index.names == ("a",)
assert result.columns.names == ["b", "c"]
all_cols = result["TOTAL", ""]
exp_cols = df.groupby(["a"]).size().astype("i8")
# to keep index.name
exp_margin = Series([len(df)], index=Index(["TOTAL"], name="a"))
exp_cols = exp_cols.append(exp_margin)
exp_cols.name = ("TOTAL", "")
tm.assert_series_equal(all_cols, exp_cols)
all_rows = result.loc["TOTAL"]
exp_rows = df.groupby(["b", "c"]).size().astype("i8")
exp_rows = exp_rows.append(Series([len(df)], index=[("TOTAL", "")]))
exp_rows.name = "TOTAL"
exp_rows = exp_rows.reindex(all_rows.index)
exp_rows = exp_rows.fillna(0).astype(np.int64)
tm.assert_series_equal(all_rows, exp_rows)
msg = "margins_name argument must be a string"
for margins_name in [666, None, ["a", "b"]]:
with pytest.raises(ValueError, match=msg):
crosstab(
a,
[b, c],
rownames=["a"],
colnames=("b", "c"),
margins=True,
margins_name=margins_name,
)
def test_crosstab_pass_values(self):
a = np.random.randint(0, 7, size=100)
b = np.random.randint(0, 3, size=100)
c = np.random.randint(0, 5, size=100)
values = np.random.randn(100)
table = crosstab(
[a, b], c, values, aggfunc=np.sum, rownames=["foo", "bar"], colnames=["baz"]
)
df = DataFrame({"foo": a, "bar": b, "baz": c, "values": values})
expected = df.pivot_table(
"values", index=["foo", "bar"], columns="baz", aggfunc=np.sum
)
tm.assert_frame_equal(table, expected)
def test_crosstab_dropna(self):
# GH 3820
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
b = np.array(["one", "one", "two", "one", "two", "two", "two"], dtype=object)
c = np.array(
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
)
res = pd.crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"], dropna=False)
m = MultiIndex.from_tuples(
[("one", "dull"), ("one", "shiny"), ("two", "dull"), ("two", "shiny")],
names=["b", "c"],
)
tm.assert_index_equal(res.columns, m)
def test_crosstab_no_overlap(self):
# GS 10291
s1 = pd.Series([1, 2, 3], index=[1, 2, 3])
s2 = pd.Series([4, 5, 6], index=[4, 5, 6])
actual = crosstab(s1, s2)
expected = pd.DataFrame()
tm.assert_frame_equal(actual, expected)
def test_margin_dropna(self):
# GH 12577
# pivot_table counts null into margin ('All')
# when margins=true and dropna=true
df = pd.DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
)
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3.0, 4.0, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, np.nan, 2], "b": [3, 3, 4, 4, 4, 4]}
)
actual = pd.crosstab(df.a, df.b, margins=True, dropna=True)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
# GH 12642
# _add_margins raises KeyError: Level None not found
# when margins=True and dropna=False
df = pd.DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]})
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [1, 3, 4], [2, 4, 6]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3, 4, "All"], name="b")
tm.assert_frame_equal(actual, expected)
df = DataFrame(
{"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]}
)
actual = pd.crosstab(df.a, df.b, margins=True, dropna=False)
expected = pd.DataFrame([[1, 0, 1], [0, 1, 1], [1, 4, 6]])
expected.index = Index([1.0, 2.0, "All"], name="a")
expected.columns = Index([3.0, 4.0, "All"], name="b")
tm.assert_frame_equal(actual, expected)
a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object)
b = np.array(["one", "one", "two", "one", "two", np.nan, "two"], dtype=object)
c = np.array(
["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object
)
actual = pd.crosstab(
a, [b, c], rownames=["a"], colnames=["b", "c"], margins=True, dropna=False
)
m = MultiIndex.from_arrays(
[
["one", "one", "two", "two", "All"],
["dull", "shiny", "dull", "shiny", ""],
],
names=["b", "c"],
)
expected = DataFrame(
[[1, 0, 1, 0, 2], [2, 0, 1, 1, 5], [3, 0, 2, 1, 7]], columns=m
)
expected.index = Index(["bar", "foo", "All"], name="a")
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab(
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=False
)
m = MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]],
names=["a", "b"],
)
expected = DataFrame(
[[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 2, 7]], index=m
)
expected.columns = Index(["dull", "shiny", "All"], name="c")
tm.assert_frame_equal(actual, expected)
actual = pd.crosstab(
[a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=True
)
m = MultiIndex.from_arrays(
[["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]],
names=["a", "b"],
)
expected = DataFrame(
[[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 1, 6]], index=m
)
expected.columns = Index(["dull", "shiny", "All"], name="c")
tm.assert_frame_equal(actual, expected)
def test_crosstab_normalize(self):
# Issue 12578
df = pd.DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
)
rindex = pd.Index([1, 2], name="a")
cindex = pd.Index([3, 4], name="b")
full_normal = pd.DataFrame([[0.2, 0], [0.2, 0.6]], index=rindex, columns=cindex)
row_normal = pd.DataFrame(
[[1.0, 0], [0.25, 0.75]], index=rindex, columns=cindex
)
col_normal = pd.DataFrame([[0.5, 0], [0.5, 1.0]], index=rindex, columns=cindex)
# Check all normalize args
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize="all"), full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True), full_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize="index"), row_normal)
tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize="columns"), col_normal)
tm.assert_frame_equal(
pd.crosstab(df.a, df.b, normalize=1),
pd.crosstab(df.a, df.b, normalize="columns"),
)
tm.assert_frame_equal(
pd.crosstab(df.a, df.b, normalize=0),
pd.crosstab(df.a, df.b, normalize="index"),
)
row_normal_margins = pd.DataFrame(
[[1.0, 0], [0.25, 0.75], [0.4, 0.6]],
index=pd.Index([1, 2, "All"], name="a", dtype="object"),
columns=pd.Index([3, 4], name="b", dtype="object"),
)
col_normal_margins = pd.DataFrame(
[[0.5, 0, 0.2], [0.5, 1.0, 0.8]],
index=pd.Index([1, 2], name="a", dtype="object"),
columns=pd.Index([3, 4, "All"], name="b", dtype="object"),
)
all_normal_margins = pd.DataFrame(
[[0.2, 0, 0.2], [0.2, 0.6, 0.8], [0.4, 0.6, 1]],
index=pd.Index([1, 2, "All"], name="a", dtype="object"),
columns=pd.Index([3, 4, "All"], name="b", dtype="object"),
)
tm.assert_frame_equal(
pd.crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins
)
tm.assert_frame_equal(
pd.crosstab(df.a, df.b, normalize="columns", margins=True),
col_normal_margins,
)
tm.assert_frame_equal(
pd.crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins
)
# Test arrays
pd.crosstab(
[np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2])
)
# Test with aggfunc
norm_counts = pd.DataFrame(
[[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]],
index=pd.Index([1, 2, "All"], name="a", dtype="object"),
columns=pd.Index([3, 4, "All"], name="b"),
)
test_case = pd.crosstab(
df.a, df.b, df.c, aggfunc="count", normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_counts)
df = pd.DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [0, 4, np.nan, 3, 3]}
)
norm_sum = pd.DataFrame(
[[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]],
index=pd.Index([1, 2, "All"], name="a", dtype="object"),
columns=pd.Index([3, 4, "All"], name="b", dtype="object"),
)
test_case = pd.crosstab(
df.a, df.b, df.c, aggfunc=np.sum, normalize="all", margins=True
)
tm.assert_frame_equal(test_case, norm_sum)
def test_crosstab_with_empties(self):
# Check handling of empties
df = pd.DataFrame(
{
"a": [1, 2, 2, 2, 2],
"b": [3, 3, 4, 4, 4],
"c": [np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
empty = pd.DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
index=pd.Index([1, 2], name="a", dtype="int64"),
columns=pd.Index([3, 4], name="b"),
)
for i in [True, "index", "columns"]:
calculated = pd.crosstab(
df.a, df.b, values=df.c, aggfunc="count", normalize=i
)
tm.assert_frame_equal(empty, calculated)
nans = pd.DataFrame(
[[0.0, np.nan], [0.0, 0.0]],
index=pd.Index([1, 2], name="a", dtype="int64"),
columns=pd.Index([3, 4], name="b"),
)
calculated = pd.crosstab(
df.a, df.b, values=df.c, aggfunc="count", normalize=False
)
tm.assert_frame_equal(nans, calculated)
def test_crosstab_errors(self):
# Issue 12578
df = pd.DataFrame(
{"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]}
)
error = "values cannot be used without an aggfunc."
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, values=df.c)
error = "aggfunc cannot be used without values"
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, aggfunc=np.mean)
error = "Not a valid normalize argument"
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, normalize="42")
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, normalize=42)
error = "Not a valid margins argument"
with pytest.raises(ValueError, match=error):
pd.crosstab(df.a, df.b, normalize="all", margins=42)
def test_crosstab_with_categorial_columns(self):
# GH 8860
df = pd.DataFrame(
{
"MAKE": ["Honda", "Acura", "Tesla", "Honda", "Honda", "Acura"],
"MODEL": ["Sedan", "Sedan", "Electric", "Pickup", "Sedan", "Sedan"],
}
)
categories = ["Sedan", "Electric", "Pickup"]
df["MODEL"] = df["MODEL"].astype("category").cat.set_categories(categories)
result = pd.crosstab(df["MAKE"], df["MODEL"])
expected_index = pd.Index(["Acura", "Honda", "Tesla"], name="MAKE")
expected_columns = pd.CategoricalIndex(
categories, categories=categories, ordered=False, name="MODEL"
)
expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]]
expected = pd.DataFrame(
expected_data, index=expected_index, columns=expected_columns
)
tm.assert_frame_equal(result, expected)
def test_crosstab_with_numpy_size(self):
# GH 4003
df = pd.DataFrame(
{
"A": ["one", "one", "two", "three"] * 6,
"B": ["A", "B", "C"] * 8,
"C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4,
"D": np.random.randn(24),
"E": np.random.randn(24),
}
)
result = pd.crosstab(
index=[df["A"], df["B"]],
columns=[df["C"]],
margins=True,
aggfunc=np.size,
values=df["D"],
)
expected_index = pd.MultiIndex(
levels=[["All", "one", "three", "two"], ["", "A", "B", "C"]],
codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]],
names=["A", "B"],
)
expected_column = pd.Index(["bar", "foo", "All"], dtype="object", name="C")
expected_data = np.array(
[
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, 2.0, 4.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[2.0, np.nan, 2.0],
[np.nan, 2.0, 2.0],
[12.0, 12.0, 24.0],
]
)
expected = pd.DataFrame(
expected_data, index=expected_index, columns=expected_column
)
tm.assert_frame_equal(result, expected)
def test_crosstab_dup_index_names(self):
# GH 13279
s = pd.Series(range(3), name="foo")
result = pd.crosstab(s, s)
expected_index = pd.Index(range(3), name="foo")
expected = pd.DataFrame(
np.eye(3, dtype=np.int64), index=expected_index, columns=expected_index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]])
def test_crosstab_tuple_name(self, names):
s1 = pd.Series(range(3), name=names[0])
s2 = pd.Series(range(1, 4), name=names[1])
mi = pd.MultiIndex.from_arrays([range(3), range(1, 4)], names=names)
expected = pd.Series(1, index=mi).unstack(1, fill_value=0)
result = pd.crosstab(s1, s2)
tm.assert_frame_equal(result, expected)
def test_crosstab_unsorted_order(self):
df = pd.DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"])
result = pd.crosstab(df.index, [df.b, df.a])
e_idx = pd.Index(["A", "B", "C"], name="row_0")
e_columns = pd.MultiIndex.from_tuples(
[(1, 4), (2, 6), (3, 5)], names=["b", "a"]
)
expected = pd.DataFrame(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns
)
tm.assert_frame_equal(result, expected)
def test_margin_normalize(self):
# GH 27500
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
# normalize on index
result = pd.crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0
)
expected = pd.DataFrame(
[[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]]
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
expected.columns = Index(["large", "small"], dtype="object", name="C")
tm.assert_frame_equal(result, expected)
# normalize on columns
result = pd.crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1
)
expected = pd.DataFrame(
[
[0.25, 0.2, 0.222222],
[0.25, 0.2, 0.222222],
[0.5, 0.2, 0.333333],
[0, 0.4, 0.222222],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["bar", "foo"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
# normalize on both index and column
result = pd.crosstab(
[df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True
)
expected = pd.DataFrame(
[
[0.111111, 0.111111, 0.222222],
[0.111111, 0.111111, 0.222222],
[0.222222, 0.111111, 0.333333],
[0.000000, 0.222222, 0.222222],
[0.444444, 0.555555, 1],
]
)
expected.columns = Index(
["large", "small", "Sub-Total"], dtype="object", name="C"
)
expected.index = MultiIndex(
levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]],
codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]],
names=["A", "B"],
)
tm.assert_frame_equal(result, expected)
|
apache-2.0
|
meppe/ros-ort
|
src/frcnn/scripts/run_detection_demo.py
|
1
|
7546
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
from lib.datasets.pascal_voc import pascal_voc
from lib.datasets.coco import coco
from lib.datasets.nico import Nico
import random
from lib.fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
import pprint
base_dir = "/opt/ros-ort/src/frcnn/src/"
CLASSES = ()
CONF_THRESH = 0.7
def parse_args():
"""Parse input arguments."""
nets = ["ZF, VGG16"]
datasets = ["nico", "pascal_voc"]
methods = ["faster_rcnn_end2end", "faster_rcnn_alt_opt"]
train_imdbs = ["nico_2017_trainval", "voc_2007_trainval"]
test_dirs = ["nico2017/nico2017", "VOCdevkit2007/VOC2007"]
net = "ZF"
method = "faster_rcnn_end2end"
cfg = 'experiments/cfgs/faster_rcnn_end2end.yml'
# The setting for pascal_voc 2007
# dataset = "pascal_voc"
# train_imdb = "voc_2007_trainval"
# test_dir = "VOCdevkit2007/VOC2007"
# The setting for nico 2017
dataset = "nico"
train_imdb = "nico_2017_trainval"
test_dir = "nico2017/nico2017"
iterations = 700
caffemodel = net.lower() + "_faster_rcnn_iter_" + str(iterations) + ".caffemodel"
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='net', help='Network to use',
choices=nets, default=net)
parser.add_argument('--dataset', dest='dataset',
help='dataset to test with',
default=dataset, type=str, choices=datasets)
parser.add_argument('--method', dest='method',
help='the method with which was trained',
default=method, type=str, choices=methods)
parser.add_argument('--imdb', dest='train_imdb',
help='dataset to train on',
default=train_imdb, type=str, choices=train_imdbs)
parser.add_argument('--caffemodel', dest='caffemodel',
help='caffemodel file',
default=caffemodel, type=str)
parser.add_argument('--test_dirs', dest='test_dir',
help='directory that contains testing data',
default=test_dir, type=str, choices=test_dirs)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=base_dir + '/' + cfg, type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
global CLASSES
if args.dataset == "nico":
CLASSES = Nico.CLASSES
elif args.dataset == "pascal_voc":
CLASSES = pascal_voc.CLASSES
return args
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, im_file):
"""Detect object classes in an image using pre-computed object proposals."""
global CONF_THRESH
# Load the demo image
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, cfg.TEST.NMS)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
cfg.TEST.HAS_RPN = True # Use RPN for proposals
prototxt = base_dir + 'models/' + args.dataset + '/' + args.net + '/faster_rcnn_end2end/test.prototxt'
if not os.path.isfile(prototxt):
raise IOError('{:s} not found.'.format(prototxt))
caffemodel = os.path.join(base_dir, "output", args.method, args.train_imdb, args.caffemodel)
if not os.path.isfile(caffemodel):
raise IOError('{:s} not found.'.format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
data_dir = cfg.DATA_DIR
test_data = os.path.join(data_dir, args.test_dir, "ImageSets", "Main", "test.txt")
test_data_file = open(test_data, 'r')
im_names = []
for line in test_data_file:
line = line.replace("\n", "")
if line != '':
im_names.append(line+".jpg")
random.shuffle(im_names)
print("Enter the number of random test images that you want to see:")
num = int(raw_input())
for im_name in im_names[:num]:
im_file = os.path.join(data_dir, args.test_dir, "JPEGImages", im_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for {}'.format(im_file)
demo(net, im_file)
plt.show()
|
gpl-3.0
|
PatrickOReilly/scikit-learn
|
sklearn/utils/tests/test_utils.py
|
47
|
9089
|
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1],
replace=False, n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
# Issue:6581, n_samples can be more when replace is True (default).
assert_equal(len(resample([1, 2], n_samples=5)), 5)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
|
bsd-3-clause
|
toobaz/pandas
|
pandas/core/computation/align.py
|
2
|
5493
|
"""Core eval alignment algorithms
"""
from functools import partial, wraps
import warnings
import numpy as np
from pandas.errors import PerformanceWarning
import pandas as pd
import pandas.core.common as com
from pandas.core.computation.common import _result_type_many
def _align_core_single_unary_op(term):
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
ret = (typ,)
if not hasattr(term.value, "axes"):
ret += (None,)
else:
ret += (_zip_axes_from_type(typ, term.value.axes),)
return ret
def _zip_axes_from_type(typ, new_axes):
axes = {ax_name: new_axes[ax_ind] for ax_ind, ax_name in typ._AXIS_NAMES.items()}
return axes
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return _result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms) if hasattr(term.value, "axes")]
term_dims = [terms[i].value.ndim for i in term_index]
ndims = pd.Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, pd.Series)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how="outer")
for i, ndim in ndims.items():
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, "reindex"):
transpose = isinstance(ti, pd.Series) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = (
"Alignment difference on axis {axis} is larger "
"than an order of magnitude on term {term!r}, by "
"more than {ordm:.4g}; performance may suffer"
).format(axis=axis, term=terms[i].name, ordm=ordm)
warnings.warn(w, category=PerformanceWarning, stacklevel=6)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if not isinstance(typ, partial) and issubclass(typ, pd.core.generic.PandasObject):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, "type") and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
|
bsd-3-clause
|
ssujit/mca
|
setup.py
|
2
|
1618
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
"scipy", "numpy", "pandas"
]
test_requirements = [
# "numpy", "pandas"
]
setup(
name='mca',
version='1.0.1',
description='Multiple correspondence analysis with pandas',
long_description=readme + '\n\n' + history,
author='Emre Safak',
author_email='[email protected]',
url='https://github.com/esafak/mca',
download_url = 'https://github.com/esafak/mca/tarball/master',
py_modules=['mca'],
package_dir={'': 'src'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords=['mca', 'statistics'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
|
bsd-3-clause
|
Midafi/scikit-image
|
skimage/io/tests/test_plugin.py
|
24
|
3393
|
from contextlib import contextmanager
from numpy.testing import assert_equal, raises
from skimage import io
from skimage.io import manage_plugins
io.use_plugin('pil')
priority_plugin = 'pil'
def setup_module():
manage_plugins.use_plugin('test') # see ../_plugins/test_plugin.py
def teardown_module():
io.reset_plugins()
@contextmanager
def protect_preferred_plugins():
"""Contexts where `preferred_plugins` can be modified w/o side-effects."""
preferred_plugins = manage_plugins.preferred_plugins.copy()
try:
yield
finally:
manage_plugins.preferred_plugins = preferred_plugins
def test_read():
io.imread('test.png', as_grey=True, dtype='i4', plugin='test')
def test_save():
io.imsave('test.png', [1, 2, 3], plugin='test')
def test_show():
io.imshow([1, 2, 3], plugin_arg=(1, 2), plugin='test')
def test_collection():
io.imread_collection('*.png', conserve_memory=False, plugin='test')
def test_use():
manage_plugins.use_plugin('test')
manage_plugins.use_plugin('test', 'imshow')
@raises(ValueError)
def test_failed_use():
manage_plugins.use_plugin('asd')
def test_use_priority():
manage_plugins.use_plugin(priority_plugin)
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, priority_plugin)
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
def test_use_priority_with_func():
manage_plugins.use_plugin('pil')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test', 'imread')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'test')
def test_plugin_order():
p = io.plugin_order()
assert 'imread' in p
assert 'test' in p['imread']
def test_available():
assert 'qt' in io.available_plugins
assert 'test' in io.find_available_plugins(loaded=True)
def test_load_preferred_plugins_all():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins = {'all': ['pil'],
'imshow': ['matplotlib']}
manage_plugins.reset_plugins()
for plugin_type in ('imread', 'imsave'):
plug, func = manage_plugins.plugin_store[plugin_type][0]
assert func == getattr(pil_plugin, plugin_type)
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == getattr(matplotlib_plugin, 'imshow')
def test_load_preferred_plugins_imread():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins['imread'] = ['pil']
manage_plugins.reset_plugins()
plug, func = manage_plugins.plugin_store['imread'][0]
assert func == pil_plugin.imread
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == matplotlib_plugin.imshow, func.__module__
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
bsd-3-clause
|
amnet04/ALECMAPREADER1
|
textdetector.py
|
1
|
3864
|
import numpy as np
import pandas
import cv2
import funcionesCV_recurrentes as cvr
from ocr import ocr
def detectar_area_variantes(mapa):
'''
Detecta las zonas de texto de la parte superior izquierda correspondientes al
título del mapa, las variantes y las traducciones.
'''
mapa = cvr.cargar_imagen(mapa)
height, width = img['gris'].shape[:2]
img_dilatada = cvr.dilatar_imagen(img['gris'], 127, 255, (13,13), 5)
img2, contours, hierarchy = cv2.findContours(img_dilatada,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas_de_texto = []
for contour in contours:
[x,y,w,h] = cv2.boundingRect(contour)
if (60 < w < width/2) or (60 < h < height/2):
if ( x < width/3 and y < height/7 and w > 16 and h > 16):
areas_de_texto.append((x,y,x+w,y+h))
areas_de_texto = pandas.DataFrame(areas_de_texto, columns=('x1','y1','x2','y2'))
areas_ordenadas = areas_de_texto.sort_values(by=['y1'])
are_tit = areas_ordenadas.iloc[0,:]
are_int = areas_ordenadas.iloc[1:,:]
are_int = are_int.sort_values(by=['x1'])
are_var = are_int.iloc[0,:]
are_tra = are_int.iloc[1,:]
im = {}
im['tit'] = cvr.cortar_imagen(img['gris'],are_tit.x1,are_tit.x2,are_tit.y1,are_tit.y2)
im['var'] = cvr.cortar_imagen(img['gris'],are_var.x1,are_var.x2,are_var.y1,are_var.y2)
im['tra'] = cvr.cortar_imagen(img['gris'],are_tra.x1,are_tra.x2,are_tra.y1,are_tra.y2)
return(im)
def obtener_titulo(mapa):
img = detectar_area_variantes(mapa)['tit']['im']
img_mejorada = cvr.preprocesar_texto_adapta(img, 255,71,30)
text = ocr(img_mejorada,'spa')
titulo = {}
titulo['numero']=text[0].replace(' ','')
titulo['ententrada']=[' '.join(text[1:])][0]
return(titulo)
def obtener_traducciones(mapa):
img = detectar_area_variantes(mapa)['tra']['im']
img_mejorada = cvr.preprocesar_texto_otsu(img,127,255,(3,7),3)
text_eng = ocr(img_mejorada,'eng')
text_fra = ocr(img_mejorada,'fra')
traducciones = {}
traducciones['frances']=text_fra[0]
traducciones['ingles']=text_eng[1]
return(traducciones)
def preparar_sub_carpeta(mapa):
volumen = mapa[5:7]
carpeta_mapa = 'm'+str(obtener_titulo(mapa)['num_mapa'])
path ="datos_procesados/"+volumen+"/"+carpeta_mapa
if not os.path.exists(path):
os.makedirs (path)
return(path)
def buscar_variantes(mapa):
titulo = obtener_titulo(mapa)
traducciones = obtener_traducciones(mapa)
departamentos = detectar_departamentos(mapa)
variantes = obtener_variantes(mapa)
imagenes_variantes={}
for variante in variantes.iterrows():
variante_nombre = variante[0].replace(' ','_')
x = variante[1].iloc[0]
y = variante[1].iloc[1]
w = variante[1].iloc[2]
h = variante[1].iloc[3]
imagen_variante = img_gray[y:h,x:w]
w_variante,h_variante=imagen_variante.shape[::-1]
cv2.imwrite(carpeta+variante_nombre+'.jpg',imagen_variante)
for key, value in departamentos.items():
x_dep = value[0][0]
y_dep = value[0][1]
w_dep= value[1][0]
h_dep = value[1][1]
imagen_departamento_gray = img_gray[y_dep:h_dep,x_dep:w_dep]
imagen_departamento = img[y_dep:h_dep,x_dep:w_dep]
concidencias = cv2.matchTemplate (imagen_departamento_gray, imagen_variante, cv2.TM_CCOEFF_NORMED)
loc = np.where( concidencias >= 0.9)
#colorcitos solo pa ve
i=0
for pt in zip(*loc[::-1]):
cv2.rectangle(imagen_departamento, pt, (pt[0] + w_variante, pt[1] + h_variante), (0,0,255), 1)
cv2.putText(imagen_departamento, variante_nombre, pt, cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,0,255), 1)
i+=1
cv2.imwrite(carpeta+key+'.jpg',imagen_departamento)
|
mit
|
lenovor/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
142
|
5990
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
untom/scikit-learn
|
examples/applications/svm_gui.py
|
287
|
11161
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
|
uglyboxer/linear_neuron
|
net-p3/lib/python3.5/site-packages/sklearn/externals/joblib/parallel.py
|
29
|
28665
|
"""
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
import os
import sys
import gc
import warnings
from collections import Sized
from math import sqrt
import functools
import time
import threading
import itertools
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
###############################################################################
# CPU that works also when multiprocessing is not installed (python2.5)
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateApply(object):
""" A non-delayed apply function.
"""
def __init__(self, func, args, kwargs):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = func(*args, **kwargs)
def get(self):
return self.results
###############################################################################
class CallBack(object):
""" Callback used by parallel: it is used for progress reporting, and
to add data to be processed
"""
def __init__(self, index, parallel):
self.parallel = parallel
self.index = index
def __call__(self, out):
self.parallel.print_progress(self.index)
if self.parallel._original_iterable:
self.parallel.dispatch_next()
class LockedIterator(object):
"""Wrapper to protect a thread-unsafe iterable against concurrent access.
A Python generator is not thread-safe by default and will raise
ValueError("generator already executing") if two threads consume it
concurrently.
In joblib this could typically happen when the passed iterator is a
generator expression and pre_dispatch != 'all'. In that case a callback is
passed to the multiprocessing apply_async call and helper threads will
trigger the consumption of the source iterable in the dispatch_next
method.
"""
def __init__(self, it):
self._lock = threading.Lock()
self._it = iter(it)
def __iter__(self):
return self
def next(self):
with self._lock:
return next(self._it)
# For Python 3 compat
__next__ = next
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs : int
The number of jobs to use for the computation. If -1 all CPUs
are used. If 1 is given, no parallel computing code is used
at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend : str or None
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose : int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch : {'all', integer, or expression, as in '3*n_jobs'}
The amount of jobs to be pre-dispatched. Default is 'all',
but it may be memory consuming, for instance if each job
involves a lot of a data.
temp_folder : str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes : int, str, or None, optional, 100e6 (100MB) by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
mmap_mode : 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend=None, verbose=0, pre_dispatch='all',
temp_folder=None, max_nbytes=100e6, mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self._pool = None
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it.
self._output = None
self._jobs = list()
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
def dispatch(self, func, args, kwargs):
""" Queue the function for computing, with or without multiprocessing
"""
if self._pool is None:
job = ImmediateApply(func, args, kwargs)
index = len(self._jobs)
if not _verbosity_filter(index, self.verbose):
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(time.time() - self._start_time)
))
self._jobs.append(job)
self.n_dispatched += 1
else:
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
try:
self._lock.acquire()
job = self._pool.apply_async(SafeFunction(func), args,
kwargs, callback=CallBack(self.n_dispatched, self))
self._jobs.append(job)
self.n_dispatched += 1
except AssertionError:
print('[Parallel] Pool seems closed')
finally:
self._lock.release()
def dispatch_next(self):
""" Dispatch more data for parallel processing
"""
self._dispatch_amount += 1
while self._dispatch_amount:
try:
# XXX: possible race condition shuffling the order of
# dispatches in the next two lines.
func, args, kwargs = next(self._original_iterable)
self.dispatch(func, args, kwargs)
self._dispatch_amount -= 1
except ValueError:
""" Race condition in accessing a generator, we skip,
the dispatch will be done later.
"""
except StopIteration:
self._iterating = False
self._original_iterable = None
return
def _print(self, msg, msg_args):
""" Display the message on stout or stderr depending on verbosity
"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self, index):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterable:
if _verbosity_filter(index, self.verbose):
return
self._print('Done %3i jobs | elapsed: %s',
(index + 1,
short_format_time(elapsed_time),
))
else:
# We are finished dispatching
queue_length = self.n_dispatched
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (queue_length - index + 1
- self._pre_dispatch_amount)
frequency = (queue_length // self.verbose) + 1
is_last_item = (index + 1 == queue_length)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
queue_length,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job queue can be filling up as
# we empty it
if hasattr(self, '_lock'):
self._lock.acquire()
job = self._jobs.pop(0)
if hasattr(self, '_lock'):
self._lock.release()
try:
self._output.append(job.get())
except tuple(self.exceptions) as exception:
try:
self._aborting = True
self._lock.acquire()
if isinstance(exception,
(KeyboardInterrupt, WorkerInterrupt)):
# We have captured a user interruption, clean up
# everything
if hasattr(self, '_pool'):
self._pool.close()
self._pool.terminate()
# We can now allow subprocesses again
os.environ.pop('__JOBLIB_SPAWNED_PARALLEL__', 0)
raise exception
elif isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (
this_report,
exception.message,
)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
raise exception_type(report)
raise exception
finally:
self._lock.release()
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
if n_jobs < 0 and mp is not None:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
self._lock = threading.Lock()
# Whether or not to set an environment flag to track
# multiple process spawning
set_environ_flag = False
if (n_jobs is None or mp is None or n_jobs == 1):
n_jobs = 1
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=2)
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
n_jobs = 1
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=2)
else:
already_forked = int(os.environ.get('__JOBLIB_SPAWNED_PARALLEL__', 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Make sure to free as much memory as possible before forking
gc.collect()
# Set an environment variable to avoid infinite loops
set_environ_flag = True
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
pre_dispatch = self.pre_dispatch
if isinstance(iterable, Sized):
# We are given a sized (an object with len). No need to be lazy.
pre_dispatch = 'all'
if pre_dispatch == 'all' or n_jobs == 1:
self._original_iterable = None
self._pre_dispatch_amount = 0
else:
# The dispatch mechanism relies on multiprocessing helper threads
# to dispatch tasks from the original iterable concurrently upon
# job completions. As Python generators are not thread-safe we
# need to wrap it with a lock
iterable = LockedIterator(iterable)
self._original_iterable = iterable
self._dispatch_amount = 0
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions
iterable = itertools.islice(iterable, pre_dispatch)
self._start_time = time.time()
self.n_dispatched = 0
try:
if set_environ_flag:
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
self._iterating = True
for function, args, kwargs in iterable:
self.dispatch(function, args, kwargs)
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output),
len(self._output),
short_format_time(elapsed_time)
))
finally:
if n_jobs > 1:
self._pool.close()
self._pool.terminate() # terminate does a join()
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
|
mit
|
jpautom/scikit-learn
|
sklearn/neural_network/rbm.py
|
46
|
12303
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
GeyerA/android_external_chromium_org
|
ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py
|
154
|
8545
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
|
bsd-3-clause
|
djajetic/AutoML2
|
lib/models.py
|
3
|
10478
|
import numpy as np
import scipy as sp
from sklearn.linear_model import Ridge, RidgeClassifier, LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, BaggingClassifier, BaggingRegressor, RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import operator
import copy
class MyAutoML:
''' Rough sketch of a class that "solves" the AutoML problem. We illustrate various type of data that will be encountered in the challenge can be handled.
Also, we make sure that the model regularly outputs predictions on validation and test data, such that, if the execution of the program is interrupted (timeout)
there are still results provided by the program. The baseline methods chosen are not optimized and do not provide particularly good results.
In particular, no special effort was put into dealing with missing values and categorical variables.
The constructor selects a model based on the data information passed as argument. This is a form of model selection "filter".
We anticipate that the participants may compute a wider range of statistics to perform filter model selection.
We also anticipate that the participants will conduct cross-validation experiments to further select amoung various models
and hyper-parameters of the model. They might walk trough "model space" systematically (e.g. with grid search), heuristically (e.g. with greedy strategies),
or stochastically (random walks). This example does not bother doing that. We simply use a growing ensemble of models to improve predictions over time.
We use ensemble methods that vote on an increasing number of classifiers. For efficiency, we use WARM START that re-uses
already trained base predictors, when available.
IMPORTANT: This is just a "toy" example:
- if was checked only on the phase 0 data at the time of release
- not all cases are considered
- this could easily break on datasets from further phases
- this is very inefficient (most ensembles have no "warm start" option, hence we do a lot of unnecessary calculations)
- there is no preprocessing
'''
def __init__(self, info, verbose=True, debug_mode=False):
self.label_num=info['label_num']
self.target_num=info['target_num']
self.task = info['task']
self.metric = info['metric']
self.postprocessor = None
#self.postprocessor = MultiLabelEnsemble(LogisticRegression(), balance=True) # To calibrate proba
self.postprocessor = MultiLabelEnsemble(LogisticRegression(), balance=False) # To calibrate proba
if debug_mode>=2:
self.name = "RandomPredictor"
self.model = RandomPredictor(self.target_num)
self.predict_method = self.model.predict_proba
return
if info['task']=='regression':
if info['is_sparse']==True:
self.name = "BaggingRidgeRegressor"
self.model = BaggingRegressor(base_estimator=Ridge(), n_estimators=1, verbose=verbose) # unfortunately, no warm start...
else:
self.name = "GradientBoostingRegressor"
self.model = GradientBoostingRegressor(n_estimators=1, max_depth=4, min_samples_split=14, verbose=verbose, warm_start = True)
self.predict_method = self.model.predict # Always predict probabilities
else:
if info['has_categorical']: # Out of lazziness, we do not convert categorical variables...
self.name = "RandomForestClassifier"
self.model = RandomForestClassifier(n_estimators=1, verbose=verbose) # unfortunately, no warm start...
elif info['is_sparse']:
self.name = "BaggingNBClassifier"
self.model = BaggingClassifier(base_estimator=BernoulliNB(), n_estimators=1, verbose=verbose) # unfortunately, no warm start...
else:
self.name = "GradientBoostingClassifier"
self.model = eval(self.name + "(n_estimators=1, verbose=" + str(verbose) + ", random_state=1, warm_start = True)")
if info['task']=='multilabel.classification':
self.model = MultiLabelEnsemble(self.model)
self.predict_method = self.model.predict_proba
def __repr__(self):
return "MyAutoML : " + self.name
def __str__(self):
return "MyAutoML : \n" + str(self.model)
def fit(self, X, Y):
self.model.fit(X,Y)
# Train a calibration model postprocessor
if self.task != 'regression' and self.postprocessor!=None:
Yhat = self.predict_method(X)
if len(Yhat.shape)==1: # IG modif Feb3 2015
Yhat = np.reshape(Yhat,(-1,1))
self.postprocessor.fit(Yhat, Y)
return self
def predict(self, X):
prediction = self.predict_method(X)
# Calibrate proba
if self.task != 'regression' and self.postprocessor!=None:
prediction = self.postprocessor.predict_proba(prediction)
# Keep only 2nd column because the second one is 1-first
if self.target_num==1 and len(prediction.shape)>1 and prediction.shape[1]>1:
prediction = prediction[:,1]
# Make sure the normalization is correct
if self.task=='multiclass.classification':
eps = 1e-15
norma = np.sum(prediction, axis=1)
for k in range(prediction.shape[0]):
prediction[k,:] /= sp.maximum(norma[k], eps)
return prediction
class MultiLabelEnsemble:
''' MultiLabelEnsemble(predictorInstance, balance=False)
Like OneVsRestClassifier: Wrapping class to train multiple models when
several objectives are given as target values. Its predictor may be an ensemble.
This class can be used to create a one-vs-rest classifier from multiple 0/1 labels
to treat a multi-label problem or to create a one-vs-rest classifier from
a categorical target variable.
Arguments:
predictorInstance -- A predictor instance is passed as argument (be careful, you must instantiate
the predictor class before passing the argument, i.e. end with (),
e.g. LogisticRegression().
balance -- True/False. If True, attempts to re-balance classes in training data
by including a random sample (without replacement) s.t. the largest class has at most 2 times
the number of elements of the smallest one.
Example Usage: mymodel = MultiLabelEnsemble (GradientBoostingClassifier(), True)'''
def __init__(self, predictorInstance, balance=False):
self.predictors = [predictorInstance]
self.n_label = 1
self.n_target = 1
self.n_estimators = 1 # for predictors that are ensembles of estimators
self.balance=balance
def __repr__(self):
return "MultiLabelEnsemble"
def __str__(self):
return "MultiLabelEnsemble : \n" + "\tn_label={}\n".format(self.n_label) + "\tn_target={}\n".format(self.n_target) + "\tn_estimators={}\n".format(self.n_estimators) + str(self.predictors[0])
def fit(self, X, Y):
if len(Y.shape)==1:
Y = np.array([Y]).transpose() # Transform vector into column matrix
# This is NOT what we want: Y = Y.reshape( -1, 1 ), because Y.shape[1] out of range
self.n_target = Y.shape[1] # Num target values = num col of Y
self.n_label = len(set(Y.ravel())) # Num labels = num classes (categories of categorical var if n_target=1 or n_target if labels are binary )
# Create the right number of copies of the predictor instance
if len(self.predictors)!=self.n_target:
predictorInstance = self.predictors[0]
self.predictors = [predictorInstance]
for i in range(1,self.n_target):
self.predictors.append(copy.copy(predictorInstance))
# Fit all predictors
for i in range(self.n_target):
# Update the number of desired prodictos
if hasattr(self.predictors[i], 'n_estimators'):
self.predictors[i].n_estimators=self.n_estimators
# Subsample if desired
if self.balance:
pos = Y[:,i]>0
neg = Y[:,i]<=0
if sum(pos)<sum(neg):
chosen = pos
not_chosen = neg
else:
chosen = neg
not_chosen = pos
num = sum(chosen)
idx=filter(lambda(x): x[1]==True, enumerate(not_chosen))
idx=np.array(zip(*idx)[0])
np.random.shuffle(idx)
chosen[idx[0:min(num, len(idx))]]=True
# Train with chosen samples
self.predictors[i].fit(X[chosen,:],Y[chosen,i])
else:
self.predictors[i].fit(X,Y[:,i])
return
def predict_proba(self, X):
if len(X.shape)==1: # IG modif Feb3 2015
X = np.reshape(X,(-1,1))
prediction = self.predictors[0].predict_proba(X)
if self.n_label==2: # Keep only 1 prediction, 1st column = (1 - 2nd column)
prediction = prediction[:,1]
for i in range(1,self.n_target): # More than 1 target, we assume that labels are binary
new_prediction = self.predictors[i].predict_proba(X)[:,1]
prediction = np.column_stack((prediction, new_prediction))
return prediction
class RandomPredictor:
''' Make random predictions.'''
def __init__(self, target_num):
self.target_num=target_num
return
def __repr__(self):
return "RandomPredictor"
def __str__(self):
return "RandomPredictor"
def fit(self, X, Y):
if len(Y.shape)>1:
assert(self.target_num==Y.shape[1])
return self
def predict_proba(self, X):
prediction = np.random.rand(X.shape[0],self.target_num)
return prediction
|
mit
|
hikhvar/Stats
|
client/view.py
|
1
|
14378
|
#!/usr/bin/env python2
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 on Sat Nov 1 13:19:14 2014
#
import wx
from controller import Controller
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
from customWidgets import EventCheckListCtrl
from customWidgets import SatzCheckListCtrl
from customWidgets import PlotPanel
from customWidgets import SchuetzenListCtrl
# end wxGlade
class MainFrame(wx.Frame):
def __init__(self, controller, *args, **kwds):
# begin wxGlade: MainFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE | wx.ICONIZE | wx.MINIMIZE
wx.Frame.__init__(self, *args, **kwds)
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
wxglade_tmp_menu = wx.Menu()
self.DateChangeAbleMenu = wx.MenuItem(wxglade_tmp_menu, wx.ID_ANY, _(u"Datum frei w\u00e4hlbar"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.AppendItem(self.DateChangeAbleMenu)
self.DeleteEnableMenu = wx.MenuItem(wxglade_tmp_menu, wx.ID_ANY, _(u"L\u00f6schen erlauben"), "", wx.ITEM_CHECK)
wxglade_tmp_menu.AppendItem(self.DeleteEnableMenu)
self.frame_1_menubar.Append(wxglade_tmp_menu, _("Optionen"))
self.SetMenuBar(self.frame_1_menubar)
# Menu Bar end
self.notebook_1 = wx.Notebook(self, wx.ID_ANY)
self.notebook_1_pane_Eingabe = wx.Panel(self.notebook_1, wx.ID_ANY)
self.label_5 = wx.StaticText(self.notebook_1_pane_Eingabe, wx.ID_ANY, _(u"Sch\u00fctze"))
self.combo_box_schuetze = wx.ComboBox(self.notebook_1_pane_Eingabe, wx.ID_ANY, choices=[], style=wx.CB_READONLY | wx.CB_SORT)
self.label_ringe = wx.StaticText(self.notebook_1_pane_Eingabe, wx.ID_ANY, _("Ringe"))
self.text_ctrl_satz = wx.TextCtrl(self.notebook_1_pane_Eingabe, wx.ID_ANY, "", style=wx.TE_PROCESS_ENTER)
self.label_datum = wx.StaticText(self.notebook_1_pane_Eingabe, wx.ID_ANY, _("Datum"))
self.datepicker_ctrl_eintrag = wx.DatePickerCtrl(self.notebook_1_pane_Eingabe, wx.ID_ANY)
self.panel_5 = wx.Panel(self.notebook_1_pane_Eingabe, wx.ID_ANY)
self.button_eintragen = wx.Button(self.notebook_1_pane_Eingabe, wx.ID_ANY, _("Eintragen"))
self.panel_9 = wx.Panel(self.notebook_1_pane_Eingabe, wx.ID_ANY)
self.text_ctrl_log = wx.TextCtrl(self.notebook_1_pane_Eingabe, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_READONLY)
self.notebook_1_pane_pro_termin = wx.Panel(self.notebook_1, wx.ID_ANY)
self.label_4 = wx.StaticText(self.notebook_1_pane_pro_termin, wx.ID_ANY, _("Termin"))
self.combo_box_stats_pro_termin = wx.ComboBox(self.notebook_1_pane_pro_termin, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.SchuetzenListCtrl = SchuetzenListCtrl(self.notebook_1_pane_pro_termin, [["Noname", "-","-","-","-", "----"]])
self.notebook_1_pane_grafik = wx.Panel(self.notebook_1, wx.ID_ANY)
self.label_1 = wx.StaticText(self.notebook_1_pane_grafik, wx.ID_ANY, _("Plot"), style=wx.ALIGN_CENTER)
self.combo_box_plot_mode = wx.ComboBox(self.notebook_1_pane_grafik, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.label_2 = wx.StaticText(self.notebook_1_pane_grafik, wx.ID_ANY, _("Start"), style=wx.ST_NO_AUTORESIZE)
self.combo_box_plot_start = wx.ComboBox(self.notebook_1_pane_grafik, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.label_3 = wx.StaticText(self.notebook_1_pane_grafik, wx.ID_ANY, _("Ende"))
self.combo_box_plot_end = wx.ComboBox(self.notebook_1_pane_grafik, wx.ID_ANY, choices=[], style=wx.CB_DROPDOWN)
self.sizer_4_staticbox = wx.StaticBox(self.notebook_1_pane_grafik, wx.ID_ANY, "")
self.panel_matplotlib = PlotPanel(self.notebook_1_pane_grafik, wx.ID_ANY)
self.notebook_1_pane_verwaltung = wx.Panel(self.notebook_1, wx.ID_ANY)
self.panel_1 = wx.Panel(self.notebook_1_pane_verwaltung, wx.ID_ANY, style=wx.BORDER_RAISED | wx.TAB_TRAVERSAL)
self.label_6 = wx.StaticText(self.panel_1, wx.ID_ANY, _("Vorname"), style=wx.ALIGN_CENTER)
self.text_ctrl_new_name = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
self.label_7 = wx.StaticText(self.panel_1, wx.ID_ANY, _("Nachname"))
self.text_ctrl_new_surname = wx.TextCtrl(self.panel_1, wx.ID_ANY, "")
self.panel_3 = wx.Panel(self.panel_1, wx.ID_ANY)
self.button_neu_erstellen = wx.Button(self.panel_1, wx.ID_ANY, _("Erstellen"))
self.panel_2 = wx.Panel(self.notebook_1_pane_verwaltung, wx.ID_ANY, style=wx.BORDER_RAISED | wx.TAB_TRAVERSAL)
self.label_8 = wx.StaticText(self.panel_2, wx.ID_ANY, _("Name"))
self.choice_delete = wx.Choice(self.panel_2, wx.ID_ANY, choices=[])
self.panel_4 = wx.Panel(self.panel_2, wx.ID_ANY)
self.button_delete = wx.Button(self.panel_2, wx.ID_ANY, _(u"L\u00f6schen"))
self.SatzDeleteListCtrl = SatzCheckListCtrl(self.notebook_1_pane_verwaltung, wx.ID_ANY)
self.satz_delete = wx.Button(self.notebook_1_pane_verwaltung, wx.ID_ANY, _(u"S\u00e4tze L\u00f6schen"))
self.notebook_1_pane_Ereignisse = wx.Panel(self.notebook_1, wx.ID_ANY)
self.panel_6 = wx.Panel(self.notebook_1_pane_Ereignisse, wx.ID_ANY, style=wx.BORDER_RAISED | wx.TAB_TRAVERSAL)
self.label_10 = wx.StaticText(self.panel_6, wx.ID_ANY, _("Beschreibung"))
self.text_ctrl_event_description = wx.TextCtrl(self.panel_6, wx.ID_ANY, "")
self.label_11 = wx.StaticText(self.panel_6, wx.ID_ANY, _("Datum"))
self.datepicker_ctrl_event = wx.DatePickerCtrl(self.panel_6, wx.ID_ANY)
self.panel_7 = wx.Panel(self.panel_6, wx.ID_ANY)
self.button_event_add = wx.Button(self.panel_6, wx.ID_ANY, _("Eintragen"))
self.panel_8 = wx.Panel(self.notebook_1_pane_Ereignisse, wx.ID_ANY, style=wx.BORDER_RAISED | wx.TAB_TRAVERSAL)
self.EventDeleteCtrl = EventCheckListCtrl(self.panel_8, wx.ID_ANY)
self.button_event_delete = wx.Button(self.panel_8, wx.ID_ANY, _(u"Ereignisse l\u00f6schen"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_MENU, self.OnDateEditableChange, self.DateChangeAbleMenu)
self.Bind(wx.EVT_MENU, self.OnDeleteEnable, self.DeleteEnableMenu)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEintrag, self.text_ctrl_satz)
self.Bind(wx.EVT_BUTTON, self.OnEintrag, self.button_eintragen)
self.Bind(wx.EVT_COMBOBOX, self.OnChangeStatsTermin, self.combo_box_stats_pro_termin)
self.Bind(wx.EVT_COMBOBOX, self.OnPlotModeChange, self.combo_box_plot_mode)
self.Bind(wx.EVT_COMBOBOX, self.OnPlotModeChange, self.combo_box_plot_start)
self.Bind(wx.EVT_COMBOBOX, self.OnPlotModeChange, self.combo_box_plot_end)
self.Bind(wx.EVT_BUTTON, self.OnCreate, self.button_neu_erstellen)
self.Bind(wx.EVT_BUTTON, self.OnDelete, self.button_delete)
self.Bind(wx.EVT_BUTTON, self.OnSatzDelete, self.satz_delete)
self.Bind(wx.EVT_BUTTON, self.OnEventEntry, self.button_event_add)
self.Bind(wx.EVT_BUTTON, self.OnEventDelete, self.button_event_delete)
# end wxGlade
self.controller = controller
self.controller.set_mainframe(self)
def __set_properties(self):
# begin wxGlade: MainFrame.__set_properties
self.SetTitle(_("Schuss in'n Ofen Statistiken"))
self.SetSize((978, 656))
self.text_ctrl_log.SetMinSize((250, 400))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MainFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_9 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_5 = wx.FlexGridSizer(3, 2, 0, 0)
sizer_6 = wx.BoxSizer(wx.VERTICAL)
sizer_7 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.FlexGridSizer(1, 2, 0, 100)
grid_sizer_4 = wx.FlexGridSizer(2, 2, 0, 0)
grid_sizer_2 = wx.FlexGridSizer(4, 2, 0, 0)
sizer_2 = wx.BoxSizer(wx.VERTICAL)
self.sizer_4_staticbox.Lower()
sizer_4 = wx.StaticBoxSizer(self.sizer_4_staticbox, wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
grid_sizer_3 = wx.FlexGridSizer(5, 2, 0, 0)
grid_sizer_3.Add(self.label_5, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
grid_sizer_3.Add(self.combo_box_schuetze, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
grid_sizer_3.Add(self.label_ringe, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
grid_sizer_3.Add(self.text_ctrl_satz, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
grid_sizer_3.Add(self.label_datum, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
grid_sizer_3.Add(self.datepicker_ctrl_eintrag, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
grid_sizer_3.Add(self.panel_5, 1, wx.EXPAND, 0)
grid_sizer_3.Add(self.button_eintragen, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
grid_sizer_3.Add(self.panel_9, 1, wx.EXPAND, 0)
grid_sizer_3.Add(self.text_ctrl_log, 0, wx.TOP, 10)
self.notebook_1_pane_Eingabe.SetSizer(grid_sizer_3)
grid_sizer_3.AddGrowableRow(4)
sizer_5.Add(self.label_4, 0, wx.ALIGN_CENTER | wx.ALL, 5)
sizer_5.Add(self.combo_box_stats_pro_termin, 0, 0, 0)
sizer_3.Add(sizer_5, 0, wx.EXPAND, 0)
sizer_3.Add(self.SchuetzenListCtrl, 1, wx.EXPAND, 0)
self.notebook_1_pane_pro_termin.SetSizer(sizer_3)
sizer_4.Add(self.label_1, 0, wx.ALIGN_CENTER | wx.ALL, 5)
sizer_4.Add(self.combo_box_plot_mode, 0, wx.ALIGN_CENTER | wx.ALL, 0)
sizer_4.Add(self.label_2, 0, wx.ALIGN_CENTER | wx.ALL, 5)
sizer_4.Add(self.combo_box_plot_start, 0, wx.ALIGN_CENTER | wx.ALL, 0)
sizer_4.Add(self.label_3, 0, wx.ALIGN_CENTER | wx.ALL, 5)
sizer_4.Add(self.combo_box_plot_end, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
sizer_2.Add(sizer_4, 0, wx.EXPAND, 0)
sizer_2.Add(self.panel_matplotlib, 1, wx.EXPAND, 0)
self.notebook_1_pane_grafik.SetSizer(sizer_2)
grid_sizer_2.Add(self.label_6, 0, wx.ALIGN_CENTER | wx.ALL, 5)
grid_sizer_2.Add(self.text_ctrl_new_name, 0, wx.EXPAND, 0)
grid_sizer_2.Add(self.label_7, 0, wx.ALIGN_CENTER | wx.ALL, 5)
grid_sizer_2.Add(self.text_ctrl_new_surname, 0, wx.EXPAND, 0)
grid_sizer_2.Add(self.panel_3, 1, wx.EXPAND, 0)
grid_sizer_2.Add(self.button_neu_erstellen, 0, 0, 0)
self.panel_1.SetSizer(grid_sizer_2)
grid_sizer_1.Add(self.panel_1, 1, wx.EXPAND, 0)
grid_sizer_4.Add(self.label_8, 0, wx.ALIGN_CENTER | wx.ALL, 5)
grid_sizer_4.Add(self.choice_delete, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
grid_sizer_4.Add(self.panel_4, 1, wx.EXPAND, 0)
grid_sizer_4.Add(self.button_delete, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
self.panel_2.SetSizer(grid_sizer_4)
grid_sizer_1.Add(self.panel_2, 1, wx.EXPAND, 0)
sizer_6.Add(grid_sizer_1, 0, wx.EXPAND, 0)
sizer_7.Add(self.SatzDeleteListCtrl, 1, wx.EXPAND, 0)
sizer_7.Add(self.satz_delete, 0, 0, 0)
sizer_6.Add(sizer_7, 1, wx.EXPAND, 0)
self.notebook_1_pane_verwaltung.SetSizer(sizer_6)
grid_sizer_5.Add(self.label_10, 0, wx.ALIGN_CENTER | wx.ALL, 5)
grid_sizer_5.Add(self.text_ctrl_event_description, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
grid_sizer_5.Add(self.label_11, 0, wx.ALL, 5)
grid_sizer_5.Add(self.datepicker_ctrl_event, 0, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 0)
grid_sizer_5.Add(self.panel_7, 1, wx.EXPAND, 0)
grid_sizer_5.Add(self.button_event_add, 0, 0, 0)
self.panel_6.SetSizer(grid_sizer_5)
sizer_8.Add(self.panel_6, 1, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 5)
sizer_9.Add(self.EventDeleteCtrl, 1, wx.ALL | wx.EXPAND, 5)
sizer_9.Add(self.button_event_delete, 0, wx.ALL, 4)
self.panel_8.SetSizer(sizer_9)
sizer_8.Add(self.panel_8, 3, wx.ALIGN_CENTER | wx.ALL | wx.EXPAND, 5)
self.notebook_1_pane_Ereignisse.SetSizer(sizer_8)
self.notebook_1.AddPage(self.notebook_1_pane_Eingabe, _("Eingabe"))
self.notebook_1.AddPage(self.notebook_1_pane_pro_termin, _("Statistiken"))
self.notebook_1.AddPage(self.notebook_1_pane_grafik, _("Grafiken"))
self.notebook_1.AddPage(self.notebook_1_pane_verwaltung, _("Verwaltung"))
self.notebook_1.AddPage(self.notebook_1_pane_Ereignisse, _("Ereignisse"))
sizer_1.Add(self.notebook_1, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def OnChangeSchuetze(self, event): # wxGlade: MainFrame.<event_handler>
print "Event handler 'OnChangeSchuetze' not implemented!"
event.Skip()
def OnEintrag(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnEintrag(event)
def OnChangeStatsTermin(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnChangeStatsTermin(event)
def OnPlotModeChange(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnPlotModeChange(event)
def OnCreate(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnCreate(event)
def OnDelete(self, event): # wxGlade: MainFrame.<event_handler>
print "Function not implemented. TODO look how to get value of choice."
self.controller.OnDelete(event)
def OnDateEditableChange(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnDateEditableChange(event)
def OnSatzDelete(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnSatzDelete(event)
def OnDeleteEnable(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnDeleteEnable(event)
def OnEventEntry(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnEventEntry(event)
def OnEventDelete(self, event): # wxGlade: MainFrame.<event_handler>
self.controller.OnEventDelete(event)
# end of class MainFrame
if __name__ == "__main__":
gettext.install("app") # replace with the appropriate catalog name
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = MainFrame(Controller(), None, wx.ID_ANY, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
|
mit
|
joshloyal/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
24
|
14430
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples',
lda._perplexity_precomp_distr, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics',
lda._perplexity_precomp_distr, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_doc_topic_distr_deprecation():
# Test that the appropriate warning message is displayed when a user
# attempts to pass the doc_topic_distr argument to the perplexity method
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr1 = lda.fit_transform(X)
distr2 = None
assert_warns(DeprecationWarning, lda.perplexity, X, distr1)
assert_warns(DeprecationWarning, lda.perplexity, X, distr2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
DaveL17/matplotlib
|
matplotlib.indigoPlugin/Contents/Server Plugin/chart_weather_composite.py
|
1
|
15690
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Creates a composite weather chart
The composite weather chart is a dynamic chart that allows users to add or
remove weather charts at will. For example, the user could create one
chart that contains subplots for high temperature, wind, and precipitation.
Using the chart configuration dialog, the user would be able to add or
remove elements and the chart would adjust accordingly (additional sublplots
will be added or removed as needed.)
-----
"""
# Built-in Modules
import datetime as dt
import numpy as np
import pickle
import sys
import traceback
# Third-party Modules
# Note the order and structure of matplotlib imports is intentional.
import matplotlib
matplotlib.use('AGG') # Note: this statement must be run before any other matplotlib imports are done.
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
import matplotlib.ticker as mtick
import matplotlib.patches as patches
# My modules
import chart_tools
log = chart_tools.log
payload = chart_tools.payload
p_dict = payload['p_dict']
k_dict = payload['k_dict']
state_list = payload['state_list']
dev_type = payload['dev_type']
props = payload['props']
chart_name = props['name']
plug_dict = payload['prefs']
dates_to_plot = ()
dpi = int(plt.rcParams['savefig.dpi'])
forecast_length = {'Daily': 8, 'Hourly': 24, 'wundergroundTenDay': 10, 'wundergroundHourly': 24}
height = int(props['height'])
humidity = ()
precipitation = ()
pressure = ()
temperature_high = ()
temperature_low = ()
width = int(props['width'])
wind_bearing = ()
wind_speed = ()
log['Threaddebug'].append(u"chart_weather_composite.py called.")
if plug_dict['verboseLogging']:
chart_tools.log['Threaddebug'].append(u"{0}".format(payload))
try:
def __init__():
pass
def format_subplot(s_plot, title="Title"):
"""Note that we have to set these for each subplot as it's rendered or else
the settings will only be applied to the last subplot rendered."""
s_plot.set_title(title, **k_dict['k_title_font']) # The subplot title
chart_tools.format_axis_x_ticks(ax=s_plot, p_dict=p_dict, k_dict=k_dict, logger=log)
chart_tools.format_axis_y(ax=s_plot, p_dict=p_dict, k_dict=k_dict, logger=log)
# =================================== Grids ===================================
if p_dict['showxAxisGrid']:
plot.xaxis.grid(True, **k_dict['k_grid_fig'])
if p_dict['showyAxisGrid']:
plot.yaxis.grid(True, **k_dict['k_grid_fig'])
# ================================ Tick Labels ================================
if props['customSizeFont']:
s_plot.tick_params(axis='both', labelsize=int(props['customTickFontSize']))
else:
s_plot.tick_params(axis='both', labelsize=int(plug_dict['tickFontSize']))
def transparent_chart_fill(s):
if p_dict['transparent_filled']:
s.add_patch(patches.Rectangle((0, 0), 1, 1,
transform=s.transAxes,
facecolor=p_dict['faceColor'],
zorder=1))
ax = chart_tools.make_chart_figure(width=p_dict['chart_width'], height=p_dict['chart_height'], p_dict=p_dict)
# ================================ Set Up Axes ================================
axes = props['component_list']
num_axes = len(axes)
# ============================ X Axis Observations ============================
# Daily
if dev_type in ('Daily', 'wundergroundTenDay'):
for _ in range(1, forecast_length[dev_type] + 1):
dates_to_plot += (state_list[u'd0{d}_date'.format(d=_)],)
humidity += (state_list[u'd0{h}_humidity'.format(h=_)],)
pressure += (state_list[u'd0{p}_pressure'.format(p=_)],)
temperature_high += (state_list[u'd0{th}_temperatureHigh'.format(th=_)],)
temperature_low += (state_list[u'd0{tl}_temperatureLow'.format(tl=_)],)
wind_speed += (state_list[u'd0{ws}_windSpeed'.format(ws=_)],)
wind_bearing += (state_list[u'd0{wb}_windBearing'.format(wb=_)],)
try:
precipitation += (state_list[u'd0{pt}_precipTotal'.format(pt=_)],)
except KeyError:
precipitation += (state_list[u'd0{pr}_pop'.format(pr=_)],)
x1 = [dt.datetime.strptime(_, '%Y-%m-%d') for _ in dates_to_plot]
x_offset = dt.timedelta(hours=6)
# Hourly
else:
for _ in range(1, forecast_length[dev_type] + 1):
if _ <= 9:
_ = '0{dx}'.format(dx=_)
dates_to_plot += (state_list[u'h{e}_epoch'.format(e=_)],)
humidity += (state_list[u'h{h}_humidity'.format(h=_)],)
pressure += (state_list[u'h{p}_pressure'.format(p=_)],)
temperature_high += (state_list[u'h{th}_temperature'.format(th=_)],)
temperature_low += (state_list[u'h{tl}_temperature'.format(tl=_)],)
wind_speed += (state_list[u'h{ws}_windSpeed'.format(ws=_)],)
wind_bearing += (state_list[u'h{wb}_windBearing'.format(wb=_)],)
try:
precipitation += (state_list[u'h{pi}_precipIntensity'.format(pi=_)],)
except KeyError:
precipitation += (state_list[u'h{pr}_precip'.format(pr=_)],)
x1 = [dt.datetime.fromtimestamp(_) for _ in dates_to_plot]
x_offset = dt.timedelta(hours=1)
# ================================ Set Up Plot ================================
fig, subplot = plt.subplots(nrows=num_axes, sharex=True, figsize=(width / dpi, height * num_axes / dpi))
chart_tools.format_title(p_dict=p_dict, k_dict=k_dict, loc=(0.5, 0.99))
try:
for plot in subplot:
plot.set_axis_bgcolor(p_dict['backgroundColor'])
[plot.spines[spine].set_color(p_dict['spineColor']) for spine in ('top', 'bottom', 'left', 'right')]
except IndexError:
subplot.set_axis_bgcolor(p_dict['backgroundColor'])
[subplot.spines[spine].set_color(p_dict['spineColor']) for spine in ('top', 'bottom', 'left', 'right')]
# ============================= Temperature High ==============================
if 'show_high_temperature' in axes:
subplot[0].plot(x1, temperature_high, color=p_dict['lineColor']) # Plot it
format_subplot(subplot[0], title="high temperature") # Format the subplot
transparent_chart_fill(subplot[0])
if p_dict['temperature_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['temperature_min']))
if p_dict['temperature_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['temperature_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
subplot = np.delete(subplot, 0) # Delete the subplot for the next plot
# ============================== Temperature Low ==============================
if 'show_low_temperature' in axes:
subplot[0].plot(x1, temperature_low, color=p_dict['lineColor'])
format_subplot(subplot[0], title='low temperature')
transparent_chart_fill(subplot[0])
if p_dict['temperature_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['temperature_min']))
if p_dict['temperature_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['temperature_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
subplot = np.delete(subplot, 0)
# =========================== Temperature High/Low ============================
if 'show_high_low_temperature' in axes:
subplot[0].plot(x1, temperature_high, color=p_dict['lineColor'])
subplot[0].plot(x1, temperature_low, color=p_dict['lineColor'])
format_subplot(subplot[0], title='high/low temperature')
transparent_chart_fill(subplot[0])
if p_dict['temperature_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['temperature_min']))
if p_dict['temperature_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['temperature_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
subplot = np.delete(subplot, 0)
# ================================= Humidity ==================================
if 'show_humidity' in axes:
subplot[0].plot(x1, humidity, color=p_dict['lineColor'])
format_subplot(subplot[0], title='humidity')
transparent_chart_fill(subplot[0])
if p_dict['humidity_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['humidity_min']))
if p_dict['humidity_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['humidity_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
subplot = np.delete(subplot, 0)
# ============================ Barometric Pressure ============================
if 'show_barometric_pressure' in axes:
subplot[0].plot(x1, pressure, color=p_dict['lineColor'])
format_subplot(subplot[0], title='barometric pressure')
transparent_chart_fill(subplot[0])
if p_dict['pressure_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['pressure_min']))
if p_dict['pressure_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['pressure_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
subplot = np.delete(subplot, 0)
# ========================== Wind Speed and Bearing ===========================
if 'show_wind' in axes:
data = zip(x1, wind_speed, wind_bearing)
subplot[0].plot(x1, wind_speed, color=p_dict['lineColor'])
subplot[0].set_ylim(0, max(wind_speed) + 1)
transparent_chart_fill(subplot[0])
for _ in data:
day = mdate.date2num(_[0])
location = _[1]
# Points to where the wind is going to.
subplot[0].text(day,
location,
" . ",
size=5,
va="center",
ha="center",
rotation=(_[2] * -1) + 90,
color=p_dict['lineMarkerColor'],
bbox=dict(boxstyle="larrow, pad=0.3",
fc=p_dict['lineMarkerColor'],
ec="none",
alpha=0.75
)
)
subplot[0].set_xlim(min(x1) - x_offset, max(x1) + x_offset)
my_fmt = mdate.DateFormatter(props['xAxisLabelFormat'])
subplot[0].xaxis.set_major_formatter(my_fmt)
subplot[0].set_xticks(x1)
format_subplot(subplot[0], title='wind')
if p_dict['wind_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['wind_min']))
if p_dict['wind_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['wind_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
subplot = np.delete(subplot, 0)
# ============================ Precipitation Line =============================
# Precip intensity is in inches of liquid rain per hour. using a line chart.
if 'show_precipitation' in axes:
subplot[0].plot(x1, precipitation, color=p_dict['lineColor'])
format_subplot(subplot[0], title='total precipitation')
transparent_chart_fill(subplot[0])
# Force precip to 2 decimals regardless of device setting.
subplot[0].yaxis.set_major_formatter(mtick.FormatStrFormatter(u"%.2f"))
if p_dict['precipitation_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['precipitation_min']))
if p_dict['precipitation_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['precipitation_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
subplot = np.delete(subplot, 0)
# ============================= Precipitation Bar =============================
# Precip intensity is in inches of liquid rain per hour using a bar chart.
if 'show_precipitation_bar' in axes:
subplot[0].bar(x1, precipitation, width=0.4, align='center', color=p_dict['lineColor'])
format_subplot(subplot[0], title='total precipitation')
transparent_chart_fill(subplot[0])
# Force precip to 2 decimals regardless of device setting.
subplot[0].yaxis.set_major_formatter(mtick.FormatStrFormatter(u"%.2f"))
if p_dict['precipitation_min'] not in ("", "None"):
subplot[0].set_ylim(bottom=float(p_dict['precipitation_min']))
if p_dict['precipitation_max'] not in ("", "None"):
subplot[0].set_ylim(top=float(p_dict['precipitation_max']))
# We apparently have to set this on a plot by plot basis or only the last
# plot is set.
labels = subplot[0].get_xticklabels() + subplot[0].get_yticklabels()
[label.set_fontname(p_dict['fontMain']) for label in labels]
# We don't use the subplot variable after this; but this command
# will be important if we add more subplots.
subplot = np.delete(subplot, 0)
top_space = 1 - (50.0 / (height * num_axes))
bottom_space = 40.0 / (height * num_axes)
# Note that subplots_adjust affects the space surrounding the subplots and
# not the fig.
plt.subplots_adjust(top=0.90,
bottom=0.20,
left=0.10,
right=0.90,
hspace=None,
wspace=None
)
chart_tools.save(logger=log)
except (KeyError, IndexError, ValueError, UnicodeEncodeError) as sub_error:
tb = traceback.format_exc()
chart_tools.log['Critical'].append(u"[{n}] {s}".format(n=chart_name, s=tb))
pickle.dump(chart_tools.log, sys.stdout)
|
mit
|
deepchem/deepchem
|
contrib/pubchem_dataset/create_smiles_mapping.py
|
6
|
2008
|
import pandas as pd
import os
from rdkit import Chem
import time
import gzip
import pickle
import deepchem
def main():
print("Processing PubChem FTP Download")
data_dir = deepchem.utils.get_data_dir()
sdf_dir = os.path.join(data_dir, "SDF")
compound_read_count = 0
keys = list()
values = list()
overall_start = time.time()
all_paths = list()
for path, dirs, filenames in os.walk(sdf_dir):
for filename in filenames:
# RDKit consistently hangs when trying to read this file
if "102125001_102150000" in filename:
continue
file_path = os.path.join(sdf_dir, filename)
all_paths.append(file_path)
all_paths.sort()
for filepath in all_paths:
print("Processing: {0}".format(filepath))
start = time.time()
with gzip.open(filepath, 'rb') as myfile:
suppl = Chem.ForwardSDMolSupplier(myfile)
for mol in suppl:
if mol is None: continue
cid = mol.GetProp("PUBCHEM_COMPOUND_CID")
try:
smiles = Chem.MolToSmiles(mol)
keys.append(int(cid))
values.append(smiles)
except Exception:
continue
end = time.time()
print("Processed file, processed thru compound number: {0} in {1} seconds".
format(compound_read_count, end - start))
compound_read_count = compound_read_count + 1
overall_end = time.time()
secs_elapsed = overall_end - overall_start
print("Parsed all smiles in: {0} seconds, or {1} minutes, or {2} hours".
format(secs_elapsed, secs_elapsed / 60, secs_elapsed / 3600))
print("Total length of: {}".format(len(keys)))
with open(os.path.join(data_dir, "/pubchemsmiles_tuple.pickle"), "wb") as f:
pickle.dump((keys, values), f)
print("Done")
overall_end = time.time()
secs_elapsed = overall_end - overall_start
print("Sorted and saved smiles in: {0} seconds, or {1} minutes, or {2} hours".
format(secs_elapsed, secs_elapsed / 60, secs_elapsed / 3600))
if __name__ == '__main__':
main()
|
mit
|
spennihana/h2o-3
|
h2o-py/tests/testdir_algos/kmeans/pyunit_prostateKmeans.py
|
8
|
1259
|
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.kmeans import H2OKMeansEstimator
import numpy as np
from sklearn.cluster import KMeans
def prostateKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing prostate.csv data...\n")
prostate_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
#prostate.summary()
prostate_sci = np.loadtxt(pyunit_utils.locate("smalldata/logreg/prostate_train.csv"), delimiter=',', skiprows=1)
prostate_sci = prostate_sci[:,1:]
for i in range(5,9):
#Log.info(paste("H2O K-Means with ", i, " clusters:\n", sep = ""))
#Log.info(paste( "Using these columns: ", colnames(prostate.hex)[-1]) )
prostate_km_h2o = H2OKMeansEstimator(k=i)
prostate_km_h2o.train(x=list(range(1,prostate_h2o.ncol)), training_frame=prostate_h2o)
prostate_km_h2o.show()
prostate_km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
prostate_km_sci.fit(prostate_sci)
print(prostate_km_sci.cluster_centers_)
if __name__ == "__main__":
pyunit_utils.standalone_test(prostateKmeans)
else:
prostateKmeans()
|
apache-2.0
|
moutai/scikit-learn
|
sklearn/linear_model/sag.py
|
29
|
11291
|
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
bsd-3-clause
|
eteq/bokeh
|
examples/plotting/file/burtin.py
|
43
|
4765
|
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_file
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_file("burtin.html", title="burtin.py example")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
|
bsd-3-clause
|
mihaelacr/pydeeplearn
|
code/old-version/MNISTdigits.py
|
3
|
9937
|
""" This module is manily created to test the deep belief and
rbm implementations on MNIST"""
__author__ = "Mihaela Rosca"
__contact__ = "[email protected]"
import argparse
import matplotlib.pyplot as plt
import numpy as np
import cPickle as pickle
import readmnist
import restrictedBoltzmannMachine as rbm
import deepbelief as db
import utils
import PCA
import glob
import DimensionalityReduction
from common import *
parser = argparse.ArgumentParser(description='RBM for digit recognition')
parser.add_argument('--save',dest='save',action='store_true', default=False,
help="if true, the network is serialized and saved")
parser.add_argument('--train',dest='train',action='store_true', default=False,
help=("if true, the network is trained from scratch from the"
"traning data"))
parser.add_argument('--pca', dest='pca',action='store_true', default=False,
help=("if true, the code for running PCA on the data is run"))
parser.add_argument('--rbm', dest='rbm',action='store_true', default=False,
help=("if true, the code for traning an rbm on the data is run"))
parser.add_argument('--rbmPCD', dest='rbmPCD',action='store_true', default=False,
help=("if true, the code for traning an rbm on the data is run"))
parser.add_argument('--db', dest='db',action='store_true', default=False,
help=("if true, the code for traning a deepbelief net on the"
"data is run"))
parser.add_argument('--trainSize', type=int, default=10000,
help='the number of tranining cases to be considered')
parser.add_argument('--testSize', type=int, default=1000,
help='the number of testing cases to be considered')
parser.add_argument('netFile', help="file where the serialized network should be saved")
parser.add_argument('--path',dest='path', default="MNIST", help="the path to the MNIST files")
# Get the arguments of the program
args = parser.parse_args()
def visualizeWeights(weights, imgShape, tileShape):
return utils.tile_raster_images(weights, imgShape,
tileShape, tile_spacing=(1, 1))
def rbmMain(reconstructRandom=True):
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testingVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None, bTrain=False, path=args.path)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testingVectors / 255.0
# Train the network
if args.train:
# The number of hidden units is taken from a deep learning tutorial
# The data are the values of the images have to be normalized before being
# presented to the network
nrVisible = len(trainingScaledVectors[0])
nrHidden = 500
# use 1 dropout to test the rbm for now
net = rbm.RBM(nrVisible, nrHidden, rbm.contrastiveDivergence, 1, 1)
net.train(trainingScaledVectors)
t = visualizeWeights(net.weights.T, (28,28), (10,10))
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
t = pickle.load(f)
net = pickle.load(f)
f.close()
# Reconstruct an image and see that it actually looks like a digit
test = testingScaledVectors[0,:]
# get a random image and see it looks like
if reconstructRandom:
test = np.random.random_sample(test.shape)
# Show the initial image first
plt.imshow(vectorToImage(test, (28,28)), cmap=plt.cm.gray)
plt.show()
# Show the reconstruction
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(recon, (28,28)), cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('1.png', transparent=True)
# plt.show()
# Show the weights and their form in a tile fashion
# Plot the weights
plt.imshow(t, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weights.png', transparent=True)
print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(t, f)
pickle.dump(net, f)
def rbmMainPCD():
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testingVectors, testLabels =\
readmnist.read(0, args.testSize, digits=None,bTrain=False, path=args.path)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testingVectors / 255.0
# Train the network
if args.train:
# The number of hidden units is taken from a deep learning tutorial
# The data are the values of the images have to be normalized before being
# presented to the network
nrVisible = len(trainingScaledVectors[0])
nrHidden = 500
# use 1 dropout to test the rbm for now
# net = rbm.RBM(nrVisible, nrHidden, rbm.contrastiveDivergence, 1, 1)
net = rbm.RBM(nrVisible, nrHidden, rbm.PCD, 1, 1)
net.train(trainingScaledVectors)
t = visualizeWeights(net.weights.T, (28,28), (10,10))
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
t = pickle.load(f)
net = pickle.load(f)
f.close()
# Reconstruct a training image and see that it actually looks like a digit
test = testingScaledVectors[0,:]
plt.imshow(vectorToImage(test, (28,28)), cmap=plt.cm.gray)
plt.show()
recon = net.reconstruct(test.reshape(1, test.shape[0]))
plt.imshow(vectorToImage(recon, (28,28)), cmap=plt.cm.gray)
plt.show()
# Show the weights and their form in a tile fashion
plt.imshow(t, cmap=plt.cm.gray)
plt.axis('off')
plt.savefig('weightsPCDall.png', transparent=True)
print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(t, f)
pickle.dump(net, f)
def shuffle(data, labels):
indexShuffle = np.random.permutation(len(data))
shuffledData = np.array([data[i] for i in indexShuffle])
shuffledLabels = np.array([labels[i] for i in indexShuffle])
return shuffledData, shuffledLabels
def pcaOnMnist(training, dimension=700):
principalComponents = PCA.pca(training, dimension)
low, same = PCA.reduce(principalComponents, training)
image2DInitial = vectorToImage(training[0], (28,28))
print same[0].shape
image2D = vectorToImage(same[0], (28,28))
plt.imshow(image2DInitial, cmap=plt.cm.gray)
plt.show()
plt.imshow(image2D, cmap=plt.cm.gray)
plt.show()
print "done"
def deepbeliefMNIST():
training = args.trainSize
testing = args.testSize
trainVectors, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print trainVectors[0].shape
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
trainingScaledVectors = trainVectors / 255.0
testingScaledVectors = testVectors / 255.0
vectorLabels = labelsToVectors(trainLabels, 10)
if args.train:
# net = db.DBN(3, [784, 500, 10], [Sigmoid(), Softmax()])
# net = db.DBN(4, [784, 500, 500, 10], [Sigmoid, Sigmoid, Softmax])
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
[Sigmoid, Sigmoid, Sigmoid, Softmax],
dropout=0.5, rbmDropout=0.5, visibleDropout=0.8,
rbmVisibleDropout=1)
# TODO: think about what the network should do for 2 layers
net.train(trainingScaledVectors, vectorLabels)
else:
# Take the saved network and use that for reconstructions
f = open(args.netFile, "rb")
net = pickle.load(f)
f.close()
probs, predicted = net.classify(testingScaledVectors)
correct = 0
for i in xrange(testing):
print "predicted"
print "probs"
print probs[i]
print predicted[i]
print "actual"
actual = testLabels[i]
print actual
correct += (predicted[i] == actual)
print "correct"
print correct
# for w in net.weights:
# print w
# for b in net.biases:
# print b
# t = visualizeWeights(net.weights[0].T, trainImages[0].(28, 28), (10,10))
# plt.imshow(t, cmap=plt.cm.gray)
# plt.show()
# print "done"
if args.save:
f = open(args.netFile, "wb")
pickle.dump(net, f)
f.close()
"""
Arguments:
big: should the big or small images be used?
folds: which folds should be used (1,..5) (a list). If None is passed all
folds are used
"""
def deepBeliefKanade(big=False, folds=None):
if big:
files = glob.glob('kanade_150*.pickle')
else:
files = glob.glob('kanade_f*.pickle')
if not folds:
folds = range(1, 6)
# Read the data from them. Sort out the files that do not have
# the folds that we want
# TODO: do this better (with regex in the file name)
# DO not reply on the order returned
files = files[folds]
data = []
labels = []
for filename in files:
with open(filename, "rb") as f:
# Sort out the labels from the data
dataAndLabels = pickle.load(f)
foldData = dataAndLabels[0:-1 ,:]
foldLabels = dataAndLabels[-1,:]
data.append(foldData)
labels.append(foldLabels)
# Do LDA
# Create the network
# Test
# You can also group the emotions into positive and negative to see
# if you can get better results (probably yes)
pass
# TODO: fix this (look at the ML coursework for it)
# Even better, use LDA
# think of normalizing them to 0.1 for pca as well
def pcaMain():
training = args.trainSize
testing = args.testSize
train, trainLabels =\
readmnist.read(0, training, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(0, testing, bTrain=False, path=args.path)
print train[0].shape
pcaOnMnist(train, dimension=100)
def main():
if args.db + args.pca + args.rbm + args.rbmPCD != 1:
raise Exception("You decide on one main method to run")
if args.db:
deepbeliefMNIST()
if args.pca:
pcaMain()
if args.rbmPCD:
rbmMainPCD()
if args.rbm:
rbmMain()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
roshantha9/AbstractManycoreSim
|
src/libApplicationModel/HEVCFrameTask.py
|
1
|
83715
|
import pprint
import sys, os
import random
import time
import math
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import OrderedDict
from scipy.stats import rv_discrete
from scipy.stats import exponweib
import scipy.stats as ss
import itertools
## local imports
from Task import Task
from SimParams import SimParams
from MPEG2FrameTask import MPEG2FrameTask
from AdaptiveGoPGenerator import AdaptiveGoPGenerator
# pregen data files
from libApplicationModel.DataPreloader import DataPreloader
import libApplicationModel.HEVCWorkloadParams as HEVCWLP
from util_scripts.generate_hevc_frame_culevel import getCTUsForVideo, varyProbabilities_NormalDist
class HEVCFrameTask(MPEG2FrameTask):
def __init__(self, env, id,
frame_h= SimParams.FRAME_DEFAULT_H, \
frame_w = SimParams.FRAME_DEFAULT_W, \
frame_rate = SimParams.FRAME_RATE, \
frame_type = "I", \
task_granularity = "frame", \
frame_ix_in_gop = 0, \
unique_gop_id = 0, \
gop_id = 0, \
gop_struct = SimParams.GOP_STRUCTURE, \
video_stream_id = None, \
wf_id = None, \
priority = None,
# additional for hevc
video_genre = None,
gop_decode_frame_order = None, # (fix-order, ftype-order)
gop_decode_order_ix = None, # the frame_ix according to decode order
num_ctu_per_slice = None,
num_slices_per_frame=None,
num_tiles_per_frame=None,
interleaved_slice_types=None,
adaptiveGoP_Obj = None,
load_data_from_file = False,
hevc_cc = None,
construct_partitions = True,
tile_parent_frame_task_id = None,
precalc_fr_sizes=None,
enable_workload_validation = False
):
MPEG2FrameTask.__init__(self, env, id,
frame_h, frame_w, frame_rate, frame_type, frame_ix_in_gop,
unique_gop_id, gop_id, gop_struct,
video_stream_id, wf_id,
priority,
calc_deps=False,
calc_cc=False,
calc_pri=False)
self.type = "HEVCFrame-"+self.frame_type
self.task_granularity = task_granularity # frame || tile || slice || block
# decoding order
self.gop_frame_decoding_order = gop_decode_frame_order
self.gop_decode_order_ix = gop_decode_order_ix
## block level specs
# we assume that all blocks in a slice are the same type
self.block_level_info = {} # a structure containing number of blocks per different block type
self.video_genre = video_genre
# frame level priority (Ip>Pp>Bp)
self.frame_priority = self.calc_FramePriorityInsideGOP(self.frame_ix_in_gop, adaptiveGoP_Obj, gopseq=self.gop_structure)
## deadline calculation
dl = (1.0/self.frame_rate)
self.set_deadline(dl) # this will be adjusted later
self.set_timeLeftTillDeadline(dl)
self.end_to_end_deadline = (float(len(self.gop_structure))/float(self.frame_rate))
# total memory required per uncompressed frame
max_mem = ((self.frame_h * self.frame_w) * 24)/8 # 24 bit, rgb, assume I_size = P_size = B_size (in bytes) )
self.set_maxMemConsumption(max_mem)
self.absDeadline = None
self.completedTaskSize = max_mem
# worst case execution times of different types of frames - stored for later use
self.wccIFrame = None
self.wccPFrame = None
self.wccBFrame = None
self.tile_observed_wccc = None
# avg case execution times of different types of frames - stored for later use
self.avgccIFrame = None
self.avgccPFrame = None
self.avgccBFrame = None
# verification
self.verify_num_ctus_generated = 0
# analytical wcet/wcrt
self.analytical_wcet = None
self.analytical_wcrt_with_deps = None
# task size
self.mpeg_tasksize = self.calc_encoded_mpegframe_size()
#print self.mpeg_tasksize
# HEVC interleaved slice types
self.interleaved_slice_types=interleaved_slice_types
# HEVC - number of ctu blocks per slice
self.num_ctu_per_slice = num_ctu_per_slice
# HEVC frames can be split into tiles
self.number_of_tiles = num_tiles_per_frame
self.tile_parent_frame_task_id = tile_parent_frame_task_id
self.hevc_tile_id = None
# Heirarchical B groups
self.bpyramid_ix = None
self.bpyramid_primary_task = None
# HEVC slices
self.num_slices_per_frame = num_slices_per_frame
## dependency related info ##
# populated later
self.frame_dependencies = None
self.set_dependencies(None) # dependent tasks
self.which_frames_needs_me = None
self.which_tasks_needs_me = None
self.my_closest_children = None
self.my_closest_parent = None
self.non_dep_frames = None
self.possible_interfering_frame = None
# items in this dict will get removed as the deps come in
self.outstanding_deps_parent_tids = {}
# how much decoded data do I need from my parent tasks
# this gets populated when calculating the PU level deps,
self.expected_data_from_parents = {}
self.expected_data_from_parents_slicelevel = {}
# how much decoded data do I need to send to my child tasks
self.expected_data_to_children = {}
self.expected_data_to_children_slicelevel = {}
## tracking for workload validation purposes (these are CU level stats)
self.enable_workload_validation = enable_workload_validation
if enable_workload_validation == True:
self.trackvalidate_prop_cu_sizes = {64:0, 32:0, 16:0, 8:0, 4:0}
self.trackvalidate_prop_cu_types = {"ICU":0, "PCU":0, "BCU":0, "SkipCU":0}
self.trackvalidate_cu_dectime = {"ICU_cc":[], "PCU_cc":[], "BCU_cc":[], "SkipCU_cc":[]}
self.trackvalidate_reffrdata = {} # k=parent_id_type, v=amount of data
# related to dependencies
# HEVC adaptive gop obj
self.adaptiveGoP_Obj = adaptiveGoP_Obj
if (self.adaptiveGoP_Obj !=None):
self.populate_FrameDependencies(self.adaptiveGoP_Obj, frame_ix_in_gop)
# HEVC slice/block/tile partitions
self.frame_block_partitions=None
self.frame_tile_partitions=None
if (load_data_from_file == True):
loaded_data_obj = self.load_frame_data(wf_id,
video_stream_id,
unique_gop_id,
frame_ix_in_gop,
SimParams.HEVC_FRAME_GENRAND_SEED)
self.frame_block_partitions = loaded_data_obj['frame_block_partitions']
self.frame_tile_partitions = loaded_data_obj['frame_tile_partitions']
cc = loaded_data_obj['cc']
self.expected_data_from_parents = loaded_data_obj['expected_data_from_parents']
assert (cc > 0), ": cc calculation incorrect"
else:
if construct_partitions == True:
(self.frame_block_partitions,
self.frame_tile_partitions, cc) = self.construct_slices_and_tiles()
assert (cc > 0), ": cc calculation incorrect"
else:
self.frame_block_partitions = None
self.frame_tile_partitions = None
## task level rts specs - need to calculate
if hevc_cc == None:
self.set_computationCost(cc)
self.set_remainingComputationCost(cc)
self.set_timeLeftTillCompletion(cc)
## related to dep based scheduling ##
if construct_partitions == True:
self.current_processing_unit = {'slice_id':0 , 'ctu_id':0, 'rem_cc': self.frame_block_partitions[0]['ctus'][0]["cc"]} # slice, ctu, rem_cc
#pass
else:
self.current_processing_unit = {'Notimplemented' : 0}
## stuff required for the memory reduction hack
self.hack_abstract__tile_level_block_partitions = {}
self.hack_abstract__num_ctus_in_tile = {}
def __repr__(self):
debug = "<Task "
debug += " type=" + self.type
debug += " id=" + str(self.id)
debug += " cc=" + str(self.computationCost)
debug += " wcc=" + str(self.worstCaseComputationCost)
#debug += " mmc=" + str(self.maxMemConsumption)
debug += " sdt=" + str(self.get_scheduledDispatchTime())
debug += " rt=" + str(self.releaseTime)
#debug += " d=" + str(self.deadline)
#debug += " p=" + str(self.period)
debug += " pri=" + str(self.priority)
debug += " ugid=" + str(self.unique_gop_id)
debug += " ntiles=" + str(self.getNumSubTasksTiles())
#debug += " stat=" + str(self.status)
#debug += " tltc=" + str(self.timeLeftTillCompletion)
#debug += " tltd=" + str(self.timeLeftTillDeadline)
#debug += " st=" + str(self.taskStartTime)
debug += " tct=" + str(self.taskCompleteTime)
#debug += " mdf=" + str(self.missedDeadlineFlag)
debug += " dt=" + str(self.dispatchTime)
# frame specific
#debug += " pgid=" + str(self.parent_gop_id)
#debug += " frtyp=" + str(self.frame_type)
debug += " fr_gop_ix=" + str(self.frame_ix_in_gop )
#debug += " frpr=" + str(self.frame_priority)
debug += " dep=" + str(self.dependencies)
#debug += " fr_dep=" + str(self.frame_dependencies)
#debug += " wh_fnm=" + str(self.which_frames_needs_me)
debug += " wh_tnm=" + str(self.which_tasks_needs_me)
# stream specific
debug += " wfid=" + str(self.wf_id)
debug += " vid=" + str(self.video_stream_id)
debug += " />"
return debug
def _debugLongXML(self):
debug = "<Task "
debug += " type='" + self.type+ "'"
debug += " id='" + str(self.id)+ "'"
debug += " cc='" + str(self.computationCost)+ "'"
debug += " mmc='" + str(self.maxMemConsumption)+ "'"
debug += " rt='" + str(self.releaseTime)+ "'"
debug += " d='" + str(self.deadline)+ "'"
debug += " p='" + str(self.period)+ "'"
debug += " pri='" + str(self.priority)+ "'"
debug += " st='" + str(self.taskStartTime)+ "'"
debug += " tct='" + str(self.taskCompleteTime)+ "'"
debug += " sdt='" + str(self.scheduledDispatchTime)+ "'"
debug += " wcc='" + str(self.worstCaseComputationCost)+ "'"
# frame specific
debug += " pgid='" + str(self.parent_gop_id)+ "'"
debug += " ugid='" + str(self.unique_gop_id)+ "'"
debug += " fr_gop_ix='" + str(self.frame_ix_in_gop )+ "'"
debug += " frpr='" + str(self.frame_priority)+ "'"
debug += " fps='" + str(self.frame_rate)+ "'"
debug += " dep='" + str(self.dependencies)+ "'"
debug += " fr_dep='" + str(self.frame_dependencies)+ "'"
debug += " wh_fnm='" + str(self.which_frames_needs_me)+ "'"
debug += " wh_tnm='" + str(self.which_tasks_needs_me)+ "'"
debug += " data_frm_prn='" + str(self.expected_data_from_parents)+ "'"
debug += " data_to_ch='" + str(self.expected_data_to_children)+ "'"
# stream specific
debug += " wfid='" + str(self.wf_id)+ "'"
debug += " vid='" + str(self.video_stream_id)+ "'"
debug += " res='" + str(self.frame_h)+"x"+str(self.frame_w)+ "'"
debug += " />"
return debug
def _debugShortLabel(self):
debug = "<Task "
debug += " id=" + str(self.id)
debug += " frtyp=" + str(self.frame_type)
debug += " dep=" + str(self.dependencies).replace(","," ")
#debug += " fr_dep=" + str(self.frame_dependencies).replace(","," ")
#debug += " wh_fnm=" + str(self.which_frames_needs_me).replace(","," ")
debug += " wh_tnm=" + str(self.which_tasks_needs_me).replace(","," ")
debug += " ctu=" + (self.getCurrentlyProcessingUnitRef_Label())
return debug
# minimal version of to-string (for schedulability debug)
def getSchedulability_toString(self):
debug = ""
debug += " id='" + str(self.id) + "'"
debug += " gix='" + str(self.frame_ix_in_gop)+ "'"
debug += " wfid='" + str(self.wf_id)+ "'"
debug += " vid='" + str(self.video_stream_id)+ "'"
debug += " pc='" + str(self.get_processingCore())+ "'"
debug += " cc='" + str(self.computationCost) + "'"
debug += " d='" + str(self.deadline)+ "'"
debug += " p='" + str(self.period)+ "'"
debug += " pri='" + str(self.priority)+ "'"
debug += " dt='" + str(self.dispatchTime) + "'"
return debug
def getTaskWFSTRMId(self):
name = "t_" + str(self.wf_id) + "_" + str(self.video_stream_id) + "_" + str(self.frame_ix_in_gop)
return name
# getters
def get_expected_data_from_parents(self):
return self.expected_data_from_parents
def get_expected_data_to_children(self):
return self.expected_data_to_children
def get_frame_tile_partitions(self, tid=None):
if tid == None:
return self.frame_tile_partitions
else:
if tid<len(self.frame_tile_partitions):
return self.frame_tile_partitions[tid]
else:
sys.exit("Error: get_frame_tile_partitions:: invalid tile id")
def get_gop_frame_dec_order_fix(self):
return self.gop_frame_decoding_order[0]
def get_gop_frame_dec_order_ftype(self):
return self.gop_frame_decoding_order[1]
def get_frame_block_partitions(self):
return self.frame_block_partitions
def get_adaptiveGoP_Obj(self):
return self.adaptiveGoP_Obj
def get_video_genre(self):
return self.video_genre
def get_hevc_tile_id(self):
return self.hevc_tile_id
def get_bpyramid_ix(self):
return self.bpyramid_ix
def get_bpyramid_primary_task(self):
return self.bpyramid_primary_task
# setters
def set_expected_data_from_parents(self, d):
self.expected_data_from_parents = d
def set_expected_data_to_children(self, d):
self.expected_data_to_children = d
def update_expected_data_to_children(self, k,v):
self.expected_data_to_children[k]=v
def update_expected_data_from_parent(self, k,v):
self.expected_data_from_parents[k]=v
def set_frame_block_partitions(self, fbp):
self.frame_block_partitions = fbp
def set_frame_tile_partitions(self, ftp):
self.frame_tile_partitions = ftp
def set_hevc_tile_id(self, tid):
self.hevc_tile_id = tid
def set_bpyramid_ix(self, pix):
self.bpyramid_ix = pix
def set_bpyramid_primary_task(self, v):
self.bpyramid_primary_task = v
# assume constant GOP structure
# IBBPBBPBBPBB (display order)
@staticmethod
def calc_FramePriorityInsideGOP(ix, adaptiveGoP_Obj, gopseq="IPBBPBBPBBBB"):
# priorites are set according to topological order
if adaptiveGoP_Obj == None:
return None
else:
gop_len = len(gopseq)
decoding_order_ix = adaptiveGoP_Obj.getDecodingOrder()[0]
priority_order = [-1]*len(gopseq)
for each_frame_ix, eachframe_type in enumerate(gopseq):
dec_ix = decoding_order_ix.index(each_frame_ix)
priority_order[each_frame_ix] = (gop_len-dec_ix)
return priority_order[ix]
# each frame in the GOP has dependencies
# I -> none
# P -> I or P (1 ref frame)
# B -> I/P/B frames (always at least 2 ref frames)
# nx_DG (networkX dependency graph)
def calc_FrameDependencies(self, gop_ix, task_id):
dependencies = {
'dep_gop_frames_ixs' : None,
'dep_task_ids' : None,
'which_task_needs_current_task' : None,
'which_frame_needs_current_frame' : None,
'my_closest_children' : None,
'my_closest_parent' : None,
'non_dep_frames' : None,
'possible_interfering_frames' : None,
}
return dependencies
def populate_FrameDependencies(self, AGOP_obj, gop_ix):
':type AGOP_obj: AdaptiveGoPGenerator'
self.frame_dependencies = AGOP_obj.get_gop_level_dep()[gop_ix]
self.set_dependencies(AGOP_obj.get_task_level_dep()[gop_ix]) # dependent tasks
self.which_frames_needs_me = AGOP_obj.get_which_frame_needs_current_frame()[gop_ix]
self.which_tasks_needs_me = AGOP_obj.get_which_task_needs_current_task()[gop_ix]
self.my_closest_children = AGOP_obj.get_my_closest_children()[gop_ix]
self.my_closest_parent = AGOP_obj.get_my_closest_parent()[gop_ix]
self.non_dep_frames = None
self.possible_interfering_frame = AGOP_obj.get_possible_interfering_frames()[gop_ix]
for each_p_tid in AGOP_obj.get_task_level_dep()[gop_ix]:
self.outstanding_deps_parent_tids[each_p_tid] = None
def calc_encoded_mpegframe_size(self, param_precalc_value=None):
size=0.0
if (SimParams.HEVC_GOPGEN_USEPROBABILISTIC_MODEL==True and self.video_genre!= None):
if param_precalc_value != None:
size = np.random.choice(param_precalc_value[self.frame_type])
return size
else:
PRE_SAMPLE_RANGE = 100
fr_k = self.frame_type + "-Fr"
fr_minmax_k = self.frame_type + "-Fr-minmax"
frsize_minmax = HEVCWLP.HEVCWLPARAMS_IPB_FR_SIZE_PARAMS[self.video_genre][fr_minmax_k]
frsize_range = np.linspace(frsize_minmax[0], frsize_minmax[1], PRE_SAMPLE_RANGE)
frsize_expweib_params = HEVCWLP.HEVCWLPARAMS_IPB_FR_SIZE_PARAMS[self.video_genre][fr_k]
pdf_y = exponweib.pdf(frsize_range,
frsize_expweib_params[0], frsize_expweib_params[1],
scale = frsize_expweib_params[3],
loc = frsize_expweib_params[2])*1.0
probabilities = pdf_y
# replace NaN with the closest non-NaN
mask = np.isnan(probabilities)
probabilities[mask] = np.interp(np.flatnonzero(mask),
np.flatnonzero(~mask),
probabilities[~mask])
# replace 'inf' with the closest non-inf
mask = np.isinf(probabilities)
probabilities[mask] = np.interp(np.flatnonzero(mask),
np.flatnonzero(~mask),
probabilities[~mask])
# calculate normalisation
norm_probabilties = np.array(probabilities)/np.sum(probabilities)
distrib = rv_discrete(values=(np.arange(len(frsize_range)), norm_probabilties))
# checking
if np.isnan(norm_probabilties).any():
print norm_probabilties
print probabilities
sys.exit("calc_encoded_mpegframe_size:: contains NaN")
compression_ratio_index = distrib.rvs(size=1)[0]
compression_ratio = frsize_range[compression_ratio_index]
assert (compression_ratio > 0), "calc_encoded_mpegframe_size :: compression ratio is zero"
assert (compression_ratio < 1), "calc_encoded_mpegframe_size :: compression ratio is larger than 1"
size = float(self.frame_w * self.frame_h * 3) * compression_ratio # in bytes
else:
if self.frame_type == "I": size = float(self.frame_w * self.frame_h * 3) * SimParams.HEVC_COMPRESSION_RATIO_IFRAME
elif self.frame_type == "P": size = float(self.frame_w * self.frame_h * 3) * SimParams.HEVC_COMPRESSION_RATIO_PFRAME
elif self.frame_type == "B": size = float(self.frame_w * self.frame_h * 3) * SimParams.HEVC_COMPRESSION_RATIO_BFRAME
return size
@staticmethod
def getPrecalc_ProbabilisticFramesize(fr_h, fr_w, vid_genre, sample_size=1000):
fr_types = ["I", "P", "B"]
result = {}
for each_frtype in fr_types:
PRE_SAMPLE_RANGE = sample_size
fr_k = each_frtype + "-Fr"
fr_minmax_k = each_frtype + "-Fr-minmax"
frsize_minmax = HEVCWLP.HEVCWLPARAMS_IPB_FR_SIZE_PARAMS[vid_genre][fr_minmax_k]
frsize_range = np.linspace(frsize_minmax[0], frsize_minmax[1], PRE_SAMPLE_RANGE)
frsize_expweib_params = HEVCWLP.HEVCWLPARAMS_IPB_FR_SIZE_PARAMS[vid_genre][fr_k]
pdf_y = exponweib.pdf(frsize_range,
frsize_expweib_params[0], frsize_expweib_params[1],
scale = frsize_expweib_params[3],
loc = frsize_expweib_params[2])*1.0
probabilities = pdf_y
# replace NaN with the closes non-NaN
mask = np.isnan(probabilities)
probabilities[mask] = np.interp(np.flatnonzero(mask),
np.flatnonzero(~mask),
probabilities[~mask])
norm_probabilties = np.array(probabilities)/np.sum(probabilities)
distrib = rv_discrete(values=(frsize_range, norm_probabilties))
if np.isnan(norm_probabilties).any():
print norm_probabilties
print probabilities
sys.exit("calc_encoded_mpegframe_size:: contains NaN")
compression_ratio_index = distrib.rvs(size=sample_size)
compression_ratio = frsize_range[compression_ratio_index]
size_arr = float(fr_w * fr_h) * compression_ratio
result[each_frtype] = size_arr
return result
def calc_WhichFramesToDrop(self, frame_nodecode_ix):
sys.exit("Error: HEVCFrameTask::calc_WhichFramesToDrop:: not implemented yet !")
def calc_ASAP_ReleaseTime(self, ftype, frame_nodecode_ix, time_now, gop_frames):
sys.exit("Error: HEVCFrameTask::calc_ASAP_ReleaseTime:: not implemented yet !")
# the cost of the computation depends on the number+type of different slices
def calc_FrameComputationTime(self):
total_ct = 0.0
# (TEMPSOLUTION) : comp.cost is calculated at a PU level
for each_slice_v in self.frame_block_partitions.values():
total_ct += np.sum([each_ctu_v['cc'] for each_ctu_v in each_slice_v['ctus'].values()])
return total_ct
def construct_slices_and_tiles(self):
# construct the blocks and slice to tile mapping
(block_partitions, temp_total_frame_slicelvl_cc) = self.construct_block_partitions(self.interleaved_slice_types)
tile_partitions = self.slice_to_tile_mapping(self.interleaved_slice_types)
return (block_partitions, tile_partitions, temp_total_frame_slicelvl_cc)
# this is used to split the slices into PUs
def construct_block_partitions(self, interleaved_slice_types):
frame_res = self.frame_h*self.frame_w
block_partitions = OrderedDict()
# keep track of the computation cost per ctu
temp_total_frame_slicelvl_cc = 0.0
if (SimParams.HEVC_GOPGEN_USEPROBABILISTIC_MODEL==True):
pregen_cu_cc = self._getCUComputationCostPregen_ProbModel(1000, self.video_genre, interleaved_slice_types[0])
# randomly allocate blocks to slices
ctu_id = 0
slice_id = 0
for num_ctu, each_slice in zip(self.num_ctu_per_slice, self.interleaved_slice_types):
block_partitions[slice_id] = OrderedDict()
if (SimParams.HEVC_GOPGEN_USEPROBABILISTIC_MODEL==True):
(ctus_dict,
total_frame_slicevl_cc,
new_ctu_id) = self._generate_PU_per_CTU_fromPreloadedData_ProbabilisticModel(self.video_genre, each_slice, slice_id,
frame_res, num_ctu, ctu_id, pregen_cu_cc)
#print "finished _generate_PU_per_CTU_fromPreloadedData_ProbabilisticModel: ", self.frame_ix_in_gop
else:
(ctus_dict,
total_frame_slicevl_cc,
new_ctu_id) = self._generate_PU_per_CTU_fromPreloadedData(each_slice, slice_id, frame_res, num_ctu, ctu_id)
block_partitions[slice_id] = ctus_dict
temp_total_frame_slicelvl_cc += total_frame_slicevl_cc
ctu_id = new_ctu_id
slice_id +=1
return (block_partitions, temp_total_frame_slicelvl_cc)
# allocate slices to tiles - assume equal partitions (for now)
def slice_to_tile_mapping(self, interleaved_slice_types):
total_slices = len(interleaved_slice_types)
assert(total_slices >= self.number_of_tiles), ", too less slices"
num_slices_per_tile = float(total_slices)/float(self.number_of_tiles)
if (num_slices_per_tile %1) != 0: # not a fair proportion
rounddown_slice_tile_allocation = int(num_slices_per_tile)
rounddown_total_slices = self.number_of_tiles*rounddown_slice_tile_allocation
leftover_slices = int(total_slices - rounddown_total_slices)
assert(leftover_slices>0)
# assign the equal part
num_equal_slices_per_tile = rounddown_slice_tile_allocation
slice_tile_allocation = [ {'slice_types' : interleaved_slice_types[x:x+num_equal_slices_per_tile],
'slice_ixs' : range(x,x+num_equal_slices_per_tile)}
for x in xrange(0, rounddown_total_slices, num_equal_slices_per_tile)]
# assign the leftover slices
for each_leftover_slice in xrange(leftover_slices):
rand_slice_ix = np.random.randint(0,self.number_of_tiles)
slice_tile_allocation[rand_slice_ix]['slice_types'].append(interleaved_slice_types[rounddown_total_slices+each_leftover_slice])
slice_tile_allocation[rand_slice_ix]['slice_ixs'].append(rounddown_total_slices+each_leftover_slice)
else: # fair proportion
num_slices_per_tile = int(num_slices_per_tile)
slice_tile_allocation = [ {'slice_types' : interleaved_slice_types[x:x+num_slices_per_tile],
'slice_ixs' : range(x,x+num_slices_per_tile)}
for x in xrange(0, len(interleaved_slice_types), num_slices_per_tile)]
return slice_tile_allocation
# generate all the CTUs fro the given slice
# pu and cu terminology interchangeable in this function
def _generate_PU_per_CTU_fromPreloadedData_ProbabilisticModel(self, vid_genre, slice_type, slice_id,
frame_res, num_ctus, ctu_id, pregen_cu_cc):
ctu_size = SimParams.HEVC_CTU_SIZE # we need to split this up into the quad tree structure
sum_pu_cc = 0.0
mem_size = 0.0
theoretical_max_num_ctus = int(round(float((ctu_size)*num_ctus)/float(4*4)))
if(slice_type == "Is"): possible_cu_type = ["ICU"]
elif(slice_type == "Ps"): possible_cu_type = ["ICU", "PCU", "SkipCU"]
else: possible_cu_type = ["ICU", "PCU", "BCU", "SkipCU"]
if(slice_type == "Is") : slice_pred_type = "Intra"
else: slice_pred_type = "Inter"
fr_type_str = slice_type.replace('s','') + "-fr"
cu_type_list = ["ICU", "PCU", "BCU", "SkipCU"]
cu_type_probabilities = HEVCWLP.HEVCWLPARAMS_CU_TYPE_PROBABILITIES[vid_genre][fr_type_str]
varied_cu_type_probabilities = varyProbabilities_NormalDist(cu_type_probabilities)
#print varied_cu_type_probabilities
count_ctu_id = ctu_id
total_frame_slicelvl_cc = 0.0
result_ctus = {'sltype' : slice_type, 'ctus' : OrderedDict()}
# randomly select PU size, numctus, etc from loaded data
#all_selected_pus = np.random.choice(DataPreloader.hevc_random_ctu[vid_genre][slice_type], size=num_ctus) # @UndefinedVariable
#iter_all_available_pus = itertools.cycle(DataPreloader.hevc_random_ctu[vid_genre][slice_type])
ctu_list = getCTUsForVideo(vid_genre, slice_pred_type, total_video_pixels=frame_res, force_ctu_size=True).values()
np.random.shuffle(ctu_list)
iter_all_available_pus = itertools.cycle(ctu_list)
#pregen_cu_cc = pregen_cu_cc
#pregen_cu_dep_indexes = self._getCULevelRefFrame_ProbModel(theoretical_max_num_ctus+100)
distrib = rv_discrete(values=(np.arange(len(cu_type_list)), varied_cu_type_probabilities))
tmp_cutypes_ixs = distrib.rvs(size=1000)
np.random.shuffle(tmp_cutypes_ixs)
pregen_rand_cutypes_ixs = itertools.cycle(tmp_cutypes_ixs)
# if there are more than 2 ref frames - need to decide which to pick for B-frames
if slice_type == "Bs":
if len(self.frame_dependencies) > 2:
# b_rf_ixs = [i for i, f in enumerate(self.frame_dependencies) if self.gop_structure[f] == "B"]
# for i in xrange(1000):
# b_fr_ix = np.random.choice([i for i, f in enumerate(self.frame_dependencies) if self.gop_structure[f] == "B"])
# p_fr_ix = np.random.choice([i for i, f in enumerate(self.frame_dependencies) if self.gop_structure[f] == "P"])
#
# #print b_fr_ix, p_fr_ix
# ref_fwdbwd_pairs.append([p_fr_ix, b_fr_ix])
#
# pregen_rand_rfixs = itertools.cycle(ref_fwdbwd_pairs)
#
# else:
# pregen_rand_rfixs = itertools.cycle([[0,1]])
ref_fwdbwd_pairs = []
for i in xrange(1000):
fwd_rnd_rfix = np.random.choice(range(len(self.frame_dependencies)))
bwd_rnd_rfix = np.random.choice([x for x in range(len(self.frame_dependencies)) if x != fwd_rnd_rfix])
ref_fwdbwd_pairs.append([fwd_rnd_rfix, bwd_rnd_rfix])
pregen_rand_rfixs = itertools.cycle(ref_fwdbwd_pairs)
else:
pregen_rand_rfixs = itertools.cycle([[0,1]])
#
# we need to decide if Skip will have bwd refs or not, this boolean array will be used
# x% of the time skip will have bwd refs pregenerated for speed
rand_skip_bwd_select = []
if slice_type == "Bs":
for i in xrange(1000):
if np.random.rand() < 0.5: rand_skip_bwd_select.append(True)
else: rand_skip_bwd_select.append(False)
pregen_rand_skip_bwd_select = itertools.cycle(rand_skip_bwd_select)
#pregen_cu_cc_Icu = np.random.choice(pregen_cu_cc["ICU"], size=theoretical_max_num_ctus+100) # @UndefinedVariable
#pregen_cu_cc_Pcu = np.random.choice(pregen_cu_cc["PCU"], size=theoretical_max_num_ctus+100) # @UndefinedVariable
#pregen_cu_cc_Bcu = np.random.choice(pregen_cu_cc["BCU"], size=theoretical_max_num_ctus+100) # @UndefinedVariable
#pregen_cu_cc_Skipcu = np.random.choice(pregen_cu_cc["SkipCU"], size=theoretical_max_num_ctus+100) # @UndefinedVariable
##### testing ####
# pregen_cu_cc = {
# "ICU" : [0.001] * (theoretical_max_num_ctus+100),
# "PCU" : [0.001] * (theoretical_max_num_ctus+100),
# "BCU" : [0.001] * (theoretical_max_num_ctus+100),
# "SkipCU" : [0.001] * (theoretical_max_num_ctus+100),
# }
# pregen_cu_dep_indexes = [0] * (theoretical_max_num_ctus+100)
# pregen_rand_cutypes_ixs = [0]*(theoretical_max_num_ctus+100)
#
######################
#pprint.pprint(pregen_cu_cc)
tmp_all_rand_pu_types = []
tmp_all_rand_pu_sizes = []
# all ctus in the slice
count_cu = 0
#for each_pu_sizes in all_selected_pus :
for i_ctu in xrange(num_ctus):
#self._verify_pu_sizes(ctu_size, selected_pu_sizes)
each_pu_sizes = iter_all_available_pus.next()
#each_pu_sizes = np.random.choice(ctu_list)
#selected_pu_sizes = each_pu_sizes['pu_list']
#assert (np.sum([p[0]*p[1] for p in selected_pu_sizes]) == SimParams.HEVC_CTU_SIZE)
selected_pu_sizes = each_pu_sizes
assert (np.sum([p*p for p in selected_pu_sizes]) == SimParams.HEVC_CTU_SIZE)
cum_pix = 0 # cumulative pixels count
for each_pu_ix, each_pu in enumerate(selected_pu_sizes):
#selected_pu_size = each_pu
selected_pu_size = [each_pu, each_pu]
selected_pu_pix = selected_pu_size[0]*selected_pu_size[1]
decoded_pu_size = (selected_pu_pix) * 3 # 3 bytes a pixel
tmp_cutype_ix = pregen_rand_cutypes_ixs.next()
rand_pu_type = cu_type_list[tmp_cutype_ix]
tmp_all_rand_pu_types.append(tmp_cutype_ix)
tmp_all_rand_pu_sizes.append(selected_pu_size[0])
assert(rand_pu_type in possible_cu_type), \
"_generate_PU_per_CTU_fromPreloadedData_ProbabilisticModel :: invalid CU type selected: <%s>, <%s>, <%s>"% \
(slice_type, rand_pu_type, possible_cu_type.__repr__())
# for optimisation reasons we are expanding something
# that could be put into a function (nb: python function overhead is large)
# for optimisation reasons (nb: python function overhead is large)
if (rand_pu_type=="ICU"):
dep = {'fwd': [], 'bwd':[]}
rand_pu_wcc = pregen_cu_cc["ICU"].next()
elif (rand_pu_type=="PCU"):
if slice_type == "Bs":
f_b = pregen_rand_rfixs.next()
dep = {'fwd': [self.frame_dependencies[f_b[0]], self.dependencies[f_b[0]]], 'bwd': [] }
else:
dep = {'fwd': [self.frame_dependencies[0], self.dependencies[0]], 'bwd': [] }
rand_pu_wcc = pregen_cu_cc["PCU"].next()
elif (rand_pu_type=="BCU"):
# random_ixs = np.random.randint(len(self.frame_dependencies), size=2)
# dep = {'fwd': [self.frame_dependencies[random_ixs[0]], self.dependencies[random_ixs[0]]],
# 'bwd': [self.frame_dependencies[random_ixs[1]], self.dependencies[random_ixs[1]]]
# }
f_b = pregen_rand_rfixs.next()
dep = {'fwd': [self.frame_dependencies[f_b[0]], self.dependencies[f_b[0]]],
'bwd': [self.frame_dependencies[f_b[1]], self.dependencies[f_b[1]]]
}
rand_pu_wcc = pregen_cu_cc["BCU"].next()
elif (rand_pu_type=="SkipCU") and (slice_type=="Ps"):
dep = {'fwd': [self.frame_dependencies[0], self.dependencies[0]], 'bwd': [] }
rand_pu_wcc = pregen_cu_cc["SkipCU"].next()
elif (rand_pu_type=="SkipCU") and (slice_type=="Bs"):
#random_ixs = np.random.randint(len(self.frame_dependencies), size=2)
f_b = pregen_rand_rfixs.next()
if pregen_rand_skip_bwd_select.next() == True:
dep = {'fwd': [self.frame_dependencies[f_b[0]], self.dependencies[f_b[0]]],
'bwd': []
}
else: # has both bwd and fwd
dep = {'fwd': [self.frame_dependencies[f_b[0]], self.dependencies[f_b[0]]],
'bwd': [self.frame_dependencies[f_b[1]], self.dependencies[f_b[1]]]
}
rand_pu_wcc = pregen_cu_cc["SkipCU"].next()
#cc = rand_pu_wcc * SimParams.CPU_EXEC_SPEED_RATIO
#pprint.pprint(HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR)
cc = rand_pu_wcc * np.random.uniform(HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR[rand_pu_type][0],
HEVCWLP.HEVCWLPARAMS_SCALE_FACTOR[rand_pu_type][1])
assert(cc != 0) , "PU size is zero"
sum_pu_cc += cc
cum_pix += selected_pu_pix
# here we fill the dependency info - from parents
self._populate_expected_data_from_parents(slice_id, dep, decoded_pu_size)
count_cu+=1
#############################################################
# update tracking info - only for validation #
if self.enable_workload_validation == True:
self.trackvalidate_prop_cu_sizes[selected_pu_size[0]]+=1
self.trackvalidate_prop_cu_types[rand_pu_type]+=1
self.trackvalidate_cu_dectime[rand_pu_type+"_cc"].append(cc)
if dep['fwd'] != []:
gop_ix = dep['fwd'][0] # 0 is gop level ix
k = self.gop_structure[gop_ix] + str(gop_ix)
if k not in self.trackvalidate_reffrdata:
self.trackvalidate_reffrdata[k] = decoded_pu_size
else:
self.trackvalidate_reffrdata[k] += decoded_pu_size
if dep['bwd'] != []:
gop_ix = dep['bwd'][0]
k = self.gop_structure[gop_ix] + str(gop_ix)
if k not in self.trackvalidate_reffrdata:
self.trackvalidate_reffrdata[k] = decoded_pu_size
else:
self.trackvalidate_reffrdata[k] += decoded_pu_size
#############################################################
assert (cum_pix==SimParams.HEVC_CTU_SIZE)
result_ctus['ctus'][count_ctu_id] = {
#-- most important property --
"cc" : sum_pu_cc,
#-- other (optional) properties --
#"slice_type" : slice_type,
#"pu_list" : result_pus,
#'status' : HEVCProcessingStatus.HEVC_TPROCSTAT_CTU_LEVEL_INIT,
#'deps' : pu_deps_lbl,
}
total_frame_slicelvl_cc+= result_ctus['ctus'][count_ctu_id]["cc"]
count_ctu_id+=1
sum_pu_cc = 0.0
mem_size += (sys.getsizeof(result_ctus) / 1000.0) # mem prof.
self.verify_num_ctus_generated +=1
#print "np.random.rand() : ", np.random.rand()
#print total_frame_slicelvl_cc
# f = plt.figure()
# data = [v['cc'] for k,v in result_ctus['ctus'].iteritems()]
# mu_data = np.mean(data)
# plt.plot(data)
# plt.hold(True)
# plt.axhline(y=mu_data, color='r')
# plt.show()
return (result_ctus, total_frame_slicelvl_cc, count_ctu_id)
def _getCUComputationCostPregen_ProbModel(self, size_per_cutype, vid_genre, slice_type):
plot_hist = False
possible_cu_type = ["ICU", "PCU", "BCU", "SkipCU"]
PRE_SAMPLE_RANGE = 1000
POST_SAMPLE_RANGE = size_per_cutype
ipb_cu_cc_params = HEVCWLP.HEVCWLPARAMS_IPB_CU_DECT_PARAMS[vid_genre]
result_cu_cc = {
'ICU': [],
'PCU': [],
'BCU': [],
'SkipCU': [],
}
for each_cu in possible_cu_type:
if (each_cu in ["ICU", "PCU", "BCU"]): # for I/P/B CU
minmax_lbl = each_cu+"-minmax"
cc_range = np.linspace(ipb_cu_cc_params[minmax_lbl][0],
ipb_cu_cc_params[minmax_lbl][1],
PRE_SAMPLE_RANGE)
pdf_y = exponweib.pdf(cc_range,
ipb_cu_cc_params[each_cu][0],
ipb_cu_cc_params[each_cu][1],
scale = ipb_cu_cc_params[each_cu][2],
loc = ipb_cu_cc_params[each_cu][3])
norm_pdf_y = np.array(pdf_y)/np.sum(pdf_y) # normalised
distrib = rv_discrete(values=(np.arange(len(cc_range)), norm_pdf_y))
tmp_indeces = distrib.rvs(size=POST_SAMPLE_RANGE)
result_cu_cc[each_cu] = np.take(cc_range, tmp_indeces)
else: # for Skip CU
minmax_lbl = each_cu+"-minmax"
cc_minmax = HEVCWLP.HEVCWLPARAMS_SKIP_CU_DECT_PARAMS(0, vid_genre, return_minmax=True)[1]
# if slice_type == "P":
# cc_minmax[1] = cc_minmax[1]*0.80
cc_range = np.linspace(cc_minmax[0], cc_minmax[1], PRE_SAMPLE_RANGE)
skip_cu_cc_probs = [HEVCWLP.HEVCWLPARAMS_SKIP_CU_DECT_PARAMS(x, vid_genre)
for x in cc_range]
norm_skip_cu_cc_probs = np.array(skip_cu_cc_probs)/np.sum(skip_cu_cc_probs)
distrib = rv_discrete(values=(np.arange(len(cc_range)), norm_skip_cu_cc_probs))
tmp_indeces = distrib.rvs(size=POST_SAMPLE_RANGE)
result_cu_cc[each_cu] = np.take(cc_range, tmp_indeces)
# temporary plot distribution - testing
if plot_hist == True:
f, axarr = plt.subplots(4, sharex=True)
axarr[0].hist(result_cu_cc['ICU'], bins=30)
axarr[0].ticklabel_format(style='sci', axis='both', scilimits=(0,0))
axarr[1].hist(result_cu_cc['PCU'], bins=30)
axarr[1].ticklabel_format(style='sci', axis='both', scilimits=(0,0))
axarr[2].hist(result_cu_cc['BCU'], bins=30)
axarr[2].ticklabel_format(style='sci', axis='both', scilimits=(0,0))
axarr[3].hist(result_cu_cc['SkipCU'], bins=30)
axarr[3].ticklabel_format(style='sci', axis='both', scilimits=(0,0))
plt.show()
np.random.shuffle(result_cu_cc["ICU"])
np.random.shuffle(result_cu_cc["PCU"])
np.random.shuffle(result_cu_cc["BCU"])
np.random.shuffle(result_cu_cc["SkipCU"])
# convert to iterators
result_cu_cc['ICU'] = itertools.cycle(result_cu_cc['ICU'])
result_cu_cc['PCU'] = itertools.cycle(result_cu_cc['PCU'])
result_cu_cc['BCU'] = itertools.cycle(result_cu_cc['BCU'])
result_cu_cc['SkipCU'] = itertools.cycle(result_cu_cc['SkipCU'])
return result_cu_cc
def _getCULevelRefFrame_ProbModel(self, max_num_cus):
fr_type = self.frame_type
fr_lbl_k = fr_type + str(self.frame_ix_in_gop)
fwd_fr_refs = self.adaptiveGoP_Obj.get_frameLevelRefs()[fr_lbl_k]['fwd_pred']
bwd_fr_refs = self.adaptiveGoP_Obj.get_frameLevelRefs()[fr_lbl_k]['bwd_pred']
if fwd_fr_refs==None:
fwd_fr_refs=[]
if bwd_fr_refs==None:
bwd_fr_refs=[]
prob_order = {"I":0, "P":1, "B":2}
assert (len(fwd_fr_refs) + len(bwd_fr_refs)) == len(self.frame_dependencies), "_getCULevelRefFrame:: major Error ! frame refs"
# get probabilities
fwd_fr_probs = []
bwd_fr_probs = []
for rf_fr in fwd_fr_refs:
tmp_ix = prob_order[rf_fr['fr']]
p = HEVCWLP.HEVCWLPARAMS_REFFR_SELECTION_PROBABILITIES[fr_type][tmp_ix]
fwd_fr_probs.append(p)
for rf_fr in bwd_fr_refs:
tmp_ix = prob_order[rf_fr['fr']]
p = HEVCWLP.HEVCWLPARAMS_REFFR_SELECTION_PROBABILITIES[fr_type][tmp_ix]
bwd_fr_probs.append(p)
# normalise probabilities
if len(fwd_fr_probs)>0:
fwd_fr_probs = np.array(fwd_fr_probs)
norm_fwd_fr_probs = fwd_fr_probs/np.sum(fwd_fr_probs)
if len(bwd_fr_probs)>0:
bwd_fr_probs = np.array(bwd_fr_probs)
norm_bwd_fr_probs = bwd_fr_probs/np.sum(bwd_fr_probs)
# select which ref frame for both (fwd , bwd) - for multiple CUs
result_self_frdep_ix_fwd = []
result_self_frdep_ix_bwd = []
for i in range(max_num_cus):
if len(fwd_fr_refs)>0:
distrib_fwd = rv_discrete(values=(np.arange(len(fwd_fr_refs)), norm_fwd_fr_probs))
tmp_fwd_rf_ix = distrib_fwd.rvs(size=1)[0]
fwd_rf_rf_ix = fwd_fr_refs[tmp_fwd_rf_ix]['frix']
self_frdep_ix_fwd = self.frame_dependencies.index(fwd_rf_rf_ix)
result_self_frdep_ix_fwd.append(self_frdep_ix_fwd)
if len(bwd_fr_refs)>0:
distrib_bwd = rv_discrete(values=(np.arange(len(bwd_fr_refs)), norm_bwd_fr_probs))
tmp_bwd_rf_ix = distrib_bwd.rvs(size=1)[0]
bwd_rf_rf_ix = bwd_fr_refs[tmp_bwd_rf_ix]['frix']
self_frdep_ix_bwd = self.frame_dependencies.index(bwd_rf_rf_ix)
result_self_frdep_ix_bwd.append(self_frdep_ix_bwd)
final_result = {
'result_self_frdep_ix_fwd' : result_self_frdep_ix_fwd,
'result_self_frdep_ix_bwd' : result_self_frdep_ix_bwd
}
return final_result
# result_ixs = {'fwd': [], 'bwd':[]}
#
# ## target : I-frame ##
# if fr_type == "I":
# result_ixs = {'fwd': [], 'bwd':[]}
#
# ## target : P-frame ##
# elif fr_type == "P":
# if len(self.frame_dependencies) == 0:
# sys.exit("_getCULevelRefFrame:: Error - P fr but no refs - something wrong")
# elif len(self.frame_dependencies) == 1: # no choice
# result_ixs = {'fwd': [self.frame_dependencies[0], self.dependencies[0]], 'bwd':[]}
# elif len(self.frame_dependencies) > 1: # we have choices
# result_ixs = {'fwd': [self.frame_dependencies[self_frdep_ix_fwd], self.dependencies[self_frdep_ix_fwd]], 'bwd':[]}
# else:
# pass
#
# ## target : B-frame ##
# elif fr_type == "B":
# if len(self.frame_dependencies) == 0:
# sys.exit("_getCULevelRefFrame:: Error - B fr but no refs - something wrong")
# elif len(self.frame_dependencies) == 1: # no choice
# sys.exit("_getCULevelRefFrame:: Error - B fr but only 1 refs - something wrong")
# elif len(self.frame_dependencies) == 2: # we have choices
# result_ixs = {'fwd': [self.frame_dependencies[0], self.dependencies[0]],
# 'bwd': [self.frame_dependencies[1], self.dependencies[1]]
# }
# elif len(self.frame_dependencies) > 2: # we have choices
# result_ixs = {'fwd': [self.frame_dependencies[self_frdep_ix_fwd], self.dependencies[self_frdep_ix_fwd]],
# 'bwd': [self.frame_dependencies[self_frdep_ix_bwd], self.dependencies[self_frdep_ix_bwd]]
# }
# else:
# pass
#
# else:
# pass
#
#
# return result_ixs
# generate all the CTUs fro the given slice
def _generate_PU_per_CTU_fromPreloadedData(self, slice_type, slice_id, frame_res, num_ctus, ctu_id):
ctu_size = SimParams.HEVC_CTU_SIZE # we need to split this up into the quad tree structure
sum_pu_cc = 0.0
mem_size = 0.0
if(slice_type == "Is"): pu_type = ["Ipu"]
elif(slice_type == "Ps"): pu_type = ["Ipu", "Ppu", "Ppu", "Ppu"]
else: pu_type = ["Ipu", "Ppu", "Bpu", "Bpu"]
count_ctu_id = ctu_id
total_frame_slicelvl_cc = 0.0
result_ctus = {'sltype' : slice_type, 'ctus' : OrderedDict()}
# randomly select PU size, numctus, etc from loaded data
all_random_ixs = np.random.randint(0,len(DataPreloader.hevc_random_ctu[frame_res][slice_type]), size=num_ctus)
range_num_ctus = range(num_ctus)
all_selected_pus = [DataPreloader.hevc_random_ctu[frame_res][slice_type][ix]['pu_list'] for ix in all_random_ixs]
# all ctus in the slice
for each_ctu, ix, selected_pu_sizes in zip(range_num_ctus,all_random_ixs, all_selected_pus) :
#self._verify_pu_sizes(ctu_size, selected_pu_sizes)
# horrible but faster
if len(pu_type)>1:
temp_rand_pu_ixs = np.random.randint(len(pu_type),size=len(selected_pu_sizes))
else:
temp_rand_pu_ixs = [0]* len(selected_pu_sizes)
assert (np.sum([p[0]*p[1] for p in selected_pu_sizes]) == SimParams.HEVC_CTU_SIZE)
cum_pix = 0 # cumulative pixels count
for each_pu_ix, each_pu in enumerate(selected_pu_sizes):
selected_pu_size = each_pu
selected_pu_pix = selected_pu_size[0]*selected_pu_size[1]
decoded_pu_size = (selected_pu_pix) * 3 # 3 bytes a pixel
rand_pu_type = pu_type[temp_rand_pu_ixs[each_pu_ix]]
# for optimisation reasons (nb: python function overhead is large)
if (rand_pu_type=="Ipu"):
dep = {'fwd': [], 'bwd':[]}
elif (rand_pu_type=="Ppu"):
dep = {'fwd': [self.frame_dependencies[0], self.dependencies[0]], 'bwd': [] }
elif (rand_pu_type=="Bpu"):
random_ixs = np.random.randint(len(self.frame_dependencies), size=2)
dep = {'fwd': [self.frame_dependencies[random_ixs[0]], self.dependencies[random_ixs[0]]],
'bwd': [self.frame_dependencies[random_ixs[1]], self.dependencies[random_ixs[1]]]
}
rand_pu_wcc = np.random.uniform(low=SimParams.HEVC_FIXED_BLOCK_WCCC[rand_pu_type][0],
high=SimParams.HEVC_FIXED_BLOCK_WCCC[rand_pu_type][1])
cc = rand_pu_wcc * SimParams.CPU_EXEC_SPEED_RATIO
assert(cc != 0) , "PU size is zero"
sum_pu_cc += cc
cum_pix += selected_pu_pix
# here we fill the dependency info - from parents
self._populate_expected_data_from_parents(slice_id, dep, decoded_pu_size)
assert (cum_pix==SimParams.HEVC_CTU_SIZE)
result_ctus['ctus'][count_ctu_id] = {
#-- most important property --
"cc" : sum_pu_cc,
#-- other (optional) properties --
#"slice_type" : slice_type,
#"pu_list" : result_pus,
#'status' : HEVCProcessingStatus.HEVC_TPROCSTAT_CTU_LEVEL_INIT,
#'deps' : pu_deps_lbl,
}
total_frame_slicelvl_cc+= result_ctus['ctus'][count_ctu_id]["cc"]
count_ctu_id+=1
sum_pu_cc=0.0
mem_size += (sys.getsizeof(result_ctus) / 1000.0) # mem prof.
self.verify_num_ctus_generated +=1
return (result_ctus, total_frame_slicelvl_cc, count_ctu_id)
def _populate_expected_data_from_parents(self, slice_id, dep, decoded_pu_size):
# populate expected_data_from_parents
if (dep['fwd'] != []):
fwddep_task_id = dep['fwd'][1]
if fwddep_task_id not in self.expected_data_from_parents:
self.expected_data_from_parents[fwddep_task_id] = decoded_pu_size
else:
self.expected_data_from_parents[fwddep_task_id] += decoded_pu_size
if (dep['bwd'] != []):
bwddep_task_id = dep['bwd'][1]
if bwddep_task_id not in self.expected_data_from_parents:
self.expected_data_from_parents[bwddep_task_id] = decoded_pu_size
else:
self.expected_data_from_parents[bwddep_task_id] += decoded_pu_size
def calc_deps_as_ratio_of_frame_num_pixs(self):
assert (self.verify_num_ctus_generated == np.sum(self.num_ctu_per_slice))
total_pix_data = (self.frame_h * self.frame_w) *3
total_ctu_pix_data = np.sum(self.num_ctu_per_slice)*SimParams.HEVC_CTU_SIZE*3
data_per_parent = {}
for each_parent_tid, data_size in self.expected_data_from_parents.iteritems():
data_per_parent[each_parent_tid] = (float(data_size)/float(total_ctu_pix_data)) * 100
data_per_parent['NONE'] = 100 - np.sum(data_per_parent.values())
return data_per_parent
def _verify_pu_sizes(self, sum_ctu, pu_sizes):
temp1 = np.sum([p[0]*p[1] for p in pu_sizes])
if (temp1 != sum_ctu): sys.exit("Error _verify_pu_sizes")
def _get_PU_level_deps(self, pu_type):
if (pu_type=="Ipu"):
dep = {'fwd': [], 'bwd':[]}
elif (pu_type=="Ppu"):
dep = {'fwd': [self.frame_dependencies[0], self.dependencies[0]], 'bwd': []}
elif (pu_type=="Bpu"):
random_ixs = np.random.randint(len(self.frame_dependencies), size=2)
dep = {'fwd': [self.frame_dependencies[random_ixs[0]], self.dependencies[random_ixs[0]]],
'bwd': [self.frame_dependencies[random_ixs[1]], self.dependencies[random_ixs[1]]]
}
return dep
def calc_num_CTU_theoretical(self):
max_num_CTU_frame = int(float(self.frame_h*self.frame_w)/float(SimParams.HEVC_CTU_SIZE))
max_num_CTU_tile = int(float(max_num_CTU_frame)/float(self.number_of_tiles))
return (max_num_CTU_frame, max_num_CTU_tile)
def calc_num_CTU_via_block_partitions(self):
num_ctus_count = np.sum([
len(v['ctus'].keys()) for k,v in self.frame_block_partitions.iteritems()
])
return num_ctus_count
######################
## helper functions ##
######################
# not used
def _generate_Iframe_ComputationTime(self):return 0.0
def _generate_PFrame_ComputationTime(self):return 0.0
def _generate_BFrame_ComputationTime(self):return 0.0
@staticmethod
def getStaticComputationCost(frame_h, frame_w, cpu_exec_speed_ratio):
sys.exit("Error: HEVCFrameTask::getStaticComputationCost:: not implemented yet !")
##########################################
## generate the worst-case execution times
## for I/P/B frames
##########################################
def gen_Iframe_wcc(self):
ct=0.0; bl_8x8 = (8*8)
num_8x8_blks = ((self.frame_h*self.frame_w)/(bl_8x8))
ct = (num_8x8_blks * SimParams.HEVC_FIXED_BLOCK_WCCC['Ipu'][1]) * SimParams.CPU_EXEC_SPEED_RATIO
return ct
def gen_Pframe_wcc(self):
ct=0.0; bl_8x8 = (8*8)
num_8x8_blks = ((self.frame_h*self.frame_w)/(bl_8x8))
ct = (num_8x8_blks * SimParams.HEVC_FIXED_BLOCK_WCCC['Ppu'][1]) * SimParams.CPU_EXEC_SPEED_RATIO
return ct
def gen_Bframe_wcc(self):
ct=0.0; bl_8x8 = (8*8)
num_8x8_blks = ((self.frame_h*self.frame_w)/(bl_8x8))
ct = (num_8x8_blks * SimParams.HEVC_FIXED_BLOCK_WCCC['Bpu'][1]) * SimParams.CPU_EXEC_SPEED_RATIO
return ct
###############################################
## functions to assist schedulability analysis
###############################################
# assume gop structure is the IPBBPBBPBBBB format
def getCriticalPaths(self):
sys.exit("Error: HEVCFrameTask::getCriticalPaths:: not implemented yet !")
# assume gop structure is the IPBBPBBPBBBB format
# we take into account the edges from node-->MMC(dst_task_ix = -1)
# we take into account the edge from MMC (src_task_ix=-2)-->I_frame_task
def getCriticalPaths_withMMCDataRDWR(self):
sys.exit("Error: HEVCFrameTask::getCriticalPaths_withMMCDataRDWR:: not implemented yet !")
def getEstimatedRelativeDeadline_EQF(self, dispatch_time=None):
if (dispatch_time==None): ai = self.get_dispatchTime()
else: ai = dispatch_time
ci = self.get_worstCaseComputationCost()
De2e = self.get_end_to_end_deadline()
# sum of all frames in gop
sum_gop_ci = 0.0
for each_frame_type in self.get_gop_frame_dec_order_ftype():
if(each_frame_type == "I"):
sum_gop_ci += self.get_wccIFrame()
elif(each_frame_type == "P"):
sum_gop_ci += self.get_wccPFrame()
elif(each_frame_type == "B"):
sum_gop_ci += self.get_wccBFrame()
# sum of all frames in gop - starting from current frame
sum_gop_ci_m = 0.0
for each_frame_type in self.get_gop_frame_dec_order_ftype()[self.gop_decode_order_ix:]:
if(each_frame_type == "I"):
sum_gop_ci_m += self.get_wccIFrame()
elif(each_frame_type == "P"):
sum_gop_ci_m += self.get_wccPFrame()
elif(each_frame_type == "B"):
sum_gop_ci_m += self.get_wccBFrame()
di = ai + ci + ((De2e - ai - sum_gop_ci) * ( ci/(sum_gop_ci_m)) )
assert ((di - ai) > 0)
return (di - ai)
# total deadline is divided equally amongst subtasks
def getEstimatedRelativeDeadline_Div_x(self):
De2e = self.get_end_to_end_deadline()
n = float(len(self.gop_structure))
x=1.0
di = (De2e / (n*x))
return di
###############################################
## related with dependency based scheduling
## CTU-level deps
###############################################
def setCurrentlyProcessingUnitRef(self, unit_dict):
self.current_processing_unit = unit_dict
# called when the task is completed
def nullifyCurrentlyProcessingUnitRef(self):
self.current_processing_unit = {'slice_id' : None,
'ctu_id' : None,
'rem_cc' : None,
}
def getCurrentlyProcessingUnitRef(self):
return self.current_processing_unit
def getCurrentlyProcessingUnitRef_Label(self):
max_slice_id = np.max(self.frame_block_partitions.keys())
str = "{%d/%d, %d, %.15f}" % (self.current_processing_unit['slice_id'], max_slice_id,
self.current_processing_unit['ctu_id'],
self.current_processing_unit['rem_cc'],
)
return str
def getNextProcessingUnitRef(self):
current_slice_id = self.current_processing_unit['slice_id']
current_ctu_id = self.current_processing_unit['ctu_id']
max_slice_id = np.max(self.frame_block_partitions.keys())
max_ctuid_in_current_slice = np.max(self.frame_block_partitions[current_slice_id]['ctus'].keys())
## debug##
# pprint.pprint({
# 's_id': current_slice_id, 'ctu_id': current_ctu_id, 'max_s': max_slice_id, 'max_c': max_ctuid_in_current_slice,
# })
## debug##
# -- last ctu in slice
if (current_ctu_id >= max_ctuid_in_current_slice):
# -- last slice
if (current_slice_id >= max_slice_id):
return None
else: # roll over to next slice
current_slice_id +=1
next_slice_first_ctu_id = np.min(self.frame_block_partitions[current_slice_id]['ctus'].keys())
current_ctu_id = next_slice_first_ctu_id
# -- more ctus to process in slice
else:
current_ctu_id +=1
current_slice_id = current_slice_id
cc = self.frame_block_partitions[current_slice_id]['ctus'][current_ctu_id]["cc"]
assert(cc != 0), "Error : getNextProcessingUnitRef: - CTU size is zero"
# assign
result = {'slice_id':current_slice_id,
'ctu_id':current_ctu_id,
'rem_cc': cc
}
return result
def getRemainingCTU_ComputationCost(self):
return self.current_processing_unit['rem_cc']
def setRemainingCTU_ComputationCost(self, ctu_cc):
self.current_processing_unit['rem_cc'] = ctu_cc
def getCTU_ComputationCost(self, slice_id, ctu_id):
ctu_cc = self.frame_block_partitions[slice_id]['ctus'][ctu_id]["cc"]
return ctu_cc
# called when an interrupt occurs
def updateRemainingCTU_ComputationCost(self, time_elapsed):
self.current_processing_unit['rem_cc'] = self.current_processing_unit['rem_cc']-time_elapsed
return self.current_processing_unit['rem_cc']
# have all the CTU deps been fullfiled ?
def isCTUDepsComplete(self, slice_id, ctu_id):
# get target ctu
target_ctu_dep_ids = self.frame_block_partitions[slice_id]['ctus'][ctu_id]['deps']
if len(target_ctu_dep_ids) == 0:
return True
else:
return False
# each frame task has a dep (tagged with size in bytes) from a parent
# we reduce this required dep size
# if required dep size is zero from all parents, then task is ready to run
def clearFrameDeps_bySize(self, parent_task_id, dep_size_bytes):
sys.exit("Error: clearFrameDeps_bySize:: not implemented yet")
def clearFrameDeps(self, parent_task_id):
del self.outstanding_deps_parent_tids[parent_task_id]
###############################################
## related with Task splitting
###############################################
def setTileParentFrameId(self, pid):
self.tile_parent_frame_task_id = pid
def getTileParentFrameId(self):
return self.tile_parent_frame_task_id
# tile based splits
def getNumSubTasksTiles(self):
return len(self.frame_tile_partitions)
def setTileInitialProcessingUnitRef(self, tile_block_partitions):
min_slice_id = np.min(tile_block_partitions.keys())
min_ctu_id = np.min(tile_block_partitions[min_slice_id]['ctus'].keys())
self.current_processing_unit = {'slice_id':min_slice_id , 'ctu_id':min_ctu_id, 'rem_cc': tile_block_partitions[min_slice_id]['ctus'][min_ctu_id]["cc"]} # slice, ctu, rem_cc
# execution costs per tile
def getTileLevel_ExecutionCost(self):
tile_level_cc = {}
for each_tile_ix, each_tile in enumerate(self.frame_tile_partitions):
slice_ixs = each_tile['slice_ixs']
# whats the cc for each slice
tile_sum_cc = 0.0
for each_slice_ix in slice_ixs:
tile_sum_cc += np.sum([ctu_dict['cc'] for ctu_dict in self.frame_block_partitions[each_slice_ix]['ctus'].values()])
tile_level_cc[each_tile_ix] = tile_sum_cc
return tile_level_cc
# (1) tile WCET - as a proportion of the tile pixels
def getTileWCCC_viaFiarProportions(self, tile_h, tile_w, frame_h, frame_w, frame_wccc):
if len(self.frame_tile_partitions) == 1:
return frame_wccc
else:
tile_dim_proportion = float(tile_w*tile_h)/float(frame_w*frame_h)
tile_wcc = float(frame_wccc) * float(tile_dim_proportion)
assert (tile_wcc > 0) and (tile_wcc < frame_wccc), "getTileWCCC_viaFiarProportions:: Error: %f, %f, %d" %(tile_wcc, frame_wccc, self.number_of_tiles)
return tile_wcc
# (2) tile WCET - using worst-case values of PU cc
def getTileWCCC_viaWCPUCC(self, tile_h, tile_w, frame_h, frame_w):
if len(self.frame_tile_partitions) == 1:
return frame_wccc
else:
wc_pu_cc = np.max(SimParams.HEVC_FIXED_BLOCK_WCCC.values()) * SimParams.CPU_EXEC_SPEED_RATIO
tile_wcc = (float(tile_w*tile_h) / float(8*8)) * wc_pu_cc
return tile_wcc
# (3) tile WCET - from
def setTileWCCC_viaDistribution(self, tile_wccc):
self.tile_observed_wccc(tile_wccc)
def getTileWCCC_viaDistribution(self):
return self.tile_observed_wccc
# relative deadline of the tile as a proportion of the relative deadline of the frame task
def getTileEstimatedRelativeDeadline_viaFairProportions(self, tile_h, tile_w, frame_h, frame_w, frame_relD):
if len(self.frame_tile_partitions) == 1:
return frame_relD
else:
tile_dim_proportion = float(tile_w*tile_h)/float(frame_w*frame_h)
tile_relD = frame_relD * float(tile_dim_proportion)
assert (tile_relD > 0) and (tile_relD < frame_relD), "getTileEstimatedRelativeDeadline_viaFairProportions:: Error: %f, %f" %(tile_relD, frame_relD)
return tile_relD
# block partitions - gives us the CTU level info for each tile
def getTileLevel_block_partitions(self):
if (SimParams.HEVC_MODEL_FLUSH_FRAMEBLOCK_INFO == False):
tile_level_block_partitions = {}
for each_tile_ix, each_tile in enumerate(self.frame_tile_partitions):
tile_level_block_partitions[each_tile_ix] = {}
slice_ixs = each_tile['slice_ixs']
new_sl_id = 0
for each_slice_ix in slice_ixs:
tile_level_block_partitions[each_tile_ix][new_sl_id] = self.frame_block_partitions[each_slice_ix]
new_sl_id+=1
return tile_level_block_partitions
else:
return self.hack_abstract__getTileLevel_block_partitions()
def getNumCTUinTile(self, tile_ix):
if (SimParams.HEVC_MODEL_FLUSH_FRAMEBLOCK_INFO == False):
tile_level_block_partitions = self.getTileLevel_block_partitions()
num_ctus_in_tile = np.sum([len(s['ctus'].keys())
for s_ix, s in tile_level_block_partitions[tile_ix].iteritems()])
return num_ctus_in_tile
else:
return self.hack_abstract__num_ctus_in_tile[tile_ix]
# not sure if this is important - so very lame solution
# num_ctus : number of ctus in the tile
def getTileDimensions(self, num_ctus):
if ((num_ctus % 6) == 0):
numctu_h = 6
numctu_w = (num_ctus/6)
elif ((num_ctus % 4) == 0):
numctu_h = 4
numctu_w = num_ctus/4
elif((num_ctus % 2) == 0):
numctu_h = 2
numctu_w = num_ctus/2
else:
numctu_h=1
numctu_w = num_ctus
assert (((numctu_w*SimParams.HEVC_CTU_WIDTH)*(numctu_h*SimParams.HEVC_CTU_WIDTH)) == (num_ctus*SimParams.HEVC_CTU_SIZE)), \
"%d, %d" % (numctu_w*SimParams.HEVC_CTU_WIDTH, numctu_h*SimParams.HEVC_CTU_WIDTH)
return (numctu_w*SimParams.HEVC_CTU_WIDTH, numctu_h*SimParams.HEVC_CTU_WIDTH)
# find the dependencies per tile
# gop_tile_partitions = {
# 'orig_gop_struct' => original gop structure
# 'orig_task_ids' => original task ids
# 'new_tasks' => old_task_id : <new task ids (tile level)>
# e.g : 120 : [120_0, 120_1, 120_2]
#
# def getTileLevel_Dependencies_RandomPartitionData_FromParents(self, gop_tile_partitions):
# ### expected data from parents
# new_struct_expected_data_from_parents = {}
# for each_parent_task_id , data_size in self.get_expected_data_from_parents().iteritems():
# new_tile_level_parent_ids = gop_tile_partitions['new_tasks'][each_parent_task_id]
# # if there are more parents now, randomly split the incoming edges
# if (len(new_tile_level_parent_ids)>1):
# temp_data_partitions = self.constrained_sum_sample_pos(len(new_tile_level_parent_ids), data_size)
# for each_new_parent_task_id, new_data_from_parent_bytes in zip(new_tile_level_parent_ids, temp_data_partitions):
# new_struct_expected_data_from_parents[each_new_parent_task_id] = new_data_from_parent_bytes
# else:
# new_struct_expected_data_from_parents[each_parent_task_id] = data_size
#
# return new_struct_expected_data_from_parents
#
# def getTileLevel_Dependencies_RandomPartitionData_ToChildren(self, gop_tile_partitions):
# ### expected data to children
# new_struct_expected_data_to_children = {}
# for each_child_task_id, data_size in self.get_expected_data_to_children().iteritems():
# new_tile_level_child_ids = gop_tile_partitions['new_tasks'][each_child_task_id]
# # if there are more children now, randomly split the outgoing edges
# if (len(new_tile_level_child_ids)>1):
# temp_data_partitions = self.constrained_sum_sample_pos(len(new_tile_level_child_ids), data_size)
# for each_new_child_task_id , new_data_to_children_bytes in zip(new_tile_level_child_ids, temp_data_partitions):
# new_struct_expected_data_to_children[each_new_child_task_id] = new_data_to_children_bytes
# else:
# new_struct_expected_data_to_children[each_child_task_id] = data_size
#
# return new_struct_expected_data_to_children
def getTileLevel_MpegTaskSize(self, parent_mpged_task_size, tile_w, tile_h, frame_w, frame_h):
if len(self.frame_tile_partitions) == 1:
return float(parent_mpged_task_size)
else:
return np.rint(float(parent_mpged_task_size) * float(tile_w*tile_h)/float(frame_w*frame_h))
## this is a hack to reduce the memory footprint of the HEVCFrame object ##
# the block-level cc info is taking up alot of memory per object
# we are going to replace all the ctus with a single ctu
# so these functions need to be called AFTER the whole frame and GOP has been constructed
def hack_abstract__frame_block_partitions(self):
if SimParams.HEVC_MODEL_FLUSH_FRAMEBLOCK_INFO == False:
sys.exit("Error : hack__abstract_frame_block_partitions - HEVC_MODEL_FLUSH_FRAMEBLOCK_INFO = True")
# set the tile-level new single ctus
self.hack_abstract__calculateTileLevel_block_partitions()
# reset the frame block partitions
sum_ctu_cc = self.calc_FrameComputationTime()
# template format of dict : self.frame_block_partitions[current_slice_id]['ctus'][current_ctu_id]["cc"]
self.frame_block_partitions = {}
self.frame_block_partitions = {
0 : {
'ctus' : { 0 : { 'cc' : sum_ctu_cc} },
'sltype' : None
}
}
# reset all methods and member to be compatible
self.current_processing_unit = {'slice_id':0 , 'ctu_id':0, 'rem_cc': self.frame_block_partitions[0]['ctus'][0]["cc"]} # slice, ctu, rem_cc
def hack_abstract__calculateTileLevel_block_partitions(self):
tile_level_block_partitions = {}
for each_tile_ix, each_tile in enumerate(self.frame_tile_partitions):
tile_level_block_partitions[each_tile_ix] = {}
slice_ixs = each_tile['slice_ixs']
new_sl_id = 0
for each_slice_ix in slice_ixs:
tile_level_block_partitions[each_tile_ix][new_sl_id] = self.frame_block_partitions[each_slice_ix]
new_sl_id+=1
# calculate the total cc per tile
sum_tile_cc = 0.0
for each_slice_v in tile_level_block_partitions[each_tile_ix].values():
sum_tile_cc += np.sum([each_ctu_v['cc'] for each_ctu_v in each_slice_v['ctus'].values()])
# populate hack structure
self.hack_abstract__tile_level_block_partitions[each_tile_ix] = {
0 : {
'ctus' : { 0 : { 'cc' : sum_tile_cc} },
'sltype' : None
}
}
# calc number of ctus in tile
num_ctus_in_tile = np.sum([len(s['ctus'].keys())
for s_ix, s in tile_level_block_partitions[each_tile_ix].iteritems()])
self.hack_abstract__num_ctus_in_tile[each_tile_ix] = num_ctus_in_tile
# verify
total_frame_cc = self.calc_FrameComputationTime()
sum_alltiles_cc = 0.0
num_tiles=0
for each_tile in self.hack_abstract__tile_level_block_partitions.values():
for each_slice_v in each_tile.values():
sum_alltiles_cc += np.sum([each_ctu_v['cc'] for each_ctu_v in each_slice_v['ctus'].values()])
num_tiles+=1
assert(total_frame_cc == sum_alltiles_cc), "%d, %d, %d, %d" % (total_frame_cc, sum_alltiles_cc, num_tiles, self.getNumSubTasksTiles())
def hack_abstract__getTileLevel_block_partitions(self):
return self.hack_abstract__tile_level_block_partitions
######################
## data loading ####
######################
def load_frame_data(self, wf_id, strm_id, ugid, frame_ix, rand_seed):
file_info = {
'wf_id' : wf_id,
'strm_id' : strm_id,
'ugid' : ugid,
'frame_ix' : frame_ix,
'rand_seed' : rand_seed,
}
data_obj = DataPreloader.load_frame_data_file(file_info)
return data_obj
######################
## misc function ####
######################
@staticmethod
def constrained_sum_sample_pos(n, total):
"""Return a randomly chosen list of n positive integers summing to total.
Each such list is equally likely to occur."""
# source : http://stackoverflow.com/questions/3589214/generate-multiple-random-numbers-to-equal-a-value-in-python
dividers = sorted(random.sample(xrange(1, total), n - 1))
return [a - b for a, b in zip(dividers + [total], [0] + dividers)]
@staticmethod
def _generate_list_of_random_nums(N, param_sum):
result = [0]*N
ix = 0
while(np.sum(result) < param_sum):
if(np.random.rand()>0.5):
result[ix]+=1
ix+=1
if ix==N:
ix=0
final_sum = np.sum(result)
assert(final_sum == param_sum), "calc_sum=%d" % final_sum
# tricky when there are zeros
# find list of ix where value is zero
zero_ixs = [ix for ix, v in enumerate(result)]
if(len(zero_ixs)>0):
max_val_ix = [i for i, j in enumerate(result) if j == max(result)]
for each_zero_ix in zero_ixs:
result[each_zero_ix] +=1
result[random.choice(max_val_ix)] -= 1
return result
def _weightedChoice(self, weights, objects):
#http://stackoverflow.com/questions/10803135/weighted-choice-short-and-simple
"""Return a random item from objects, with the weighting defined by weights
(which must sum to 1)."""
cs = np.cumsum(weights) #An array of the weights, cumulatively summed.
idx = np.sum(cs < np.random.rand()) #Find the index of the first weight over a random value.
return objects[idx]
class HEVCProcessingStatus:
HEVC_TPROCSTAT_CTU_LEVEL_INIT = 0
HEVC_TPROCSTAT_CTU_LEVEL_WAITING_FOR_DEPS = 1
HEVC_TPROCSTAT_CTU_LEVEL_PROCESSING = 2
HEVC_TPROCSTAT_CTU_LEVEL_COMPLETED = 3
class modified_expweib_gen(ss.rv_continuous):
#def _argcheck(self, skew):
# return np.isfinite(skew) #I guess we can confine it to finite value
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = 1 if np.isfinite(arg) else 0
return cond
# def _pdf(self, x, skew):
# return 2 * ss.exponweib.pdf(x) * ss.norm.cdf(x * skew)
|
gpl-3.0
|
ThomasBrouwer/BNMTF
|
experiments/experiments_gdsc/model_selection/run_line_search_bnmf_Sanger.py
|
1
|
4519
|
"""
Run the line search for BNMF with the Exp priors on the Sanger dataset.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BNMTF.code.models.bnmf_vb_optimised import bnmf_vb_optimised
from BNMTF.code.cross_validation.mask import compute_Ms, compute_folds
from BNMTF.code.cross_validation.line_search_bnmf import LineSearch
from BNMTF.data_drug_sensitivity.gdsc.load_data import load_gdsc
import matplotlib.pyplot as plt
##########
standardised = False #standardised Sanger or unstandardised
no_folds = 5
restarts = 5
iterations = 1000
I, J = 622,138
values_K = range(1,10+1)
alpha, beta = 1., 1.
lambdaU = 1./10.
lambdaV = 1./10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
initUV = 'random'
classifier = bnmf_vb_optimised
# Load in data
(_,X_min,M,_,_,_,_) = load_gdsc(standardised=standardised)
folds_test = compute_folds(I,J,no_folds,M)
folds_training = compute_Ms(folds_test)
(M_train,M_test) = (folds_training[0],folds_test[0])
# Run the line search
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
line_search = LineSearch(classifier,values_K,X_min,M,priors,initUV,iterations,restarts=restarts)
line_search.search()
# Plot the performances of all four metrics
metrics = ['loglikelihood', 'BIC', 'AIC', 'MSE']
for metric in metrics:
plt.figure()
plt.plot(values_K, line_search.all_values(metric), label=metric)
plt.legend(loc=3)
# Also print out all values in a dictionary
all_values = {}
for metric in metrics:
all_values[metric] = line_search.all_values(metric)
print "all_values = %s" % all_values
'''
{'MSE': [3.0272045267867274, 2.5912982754926035, 2.3421361670917507, 2.1573447121174634, 2.0038243331243812, 1.8784259845268056, 1.7778387476966746, 1.708180464480777, 1.6429264869512277, 1.5743544560258957, 1.510963466212312, 1.4556030378512141, 1.4067992849188964, 1.352139317817434, 1.3038794309418051, 1.265443621895456, 1.2243072291024002, 1.189433738928334, 1.1561686922108647, 1.1175032736906385, 1.099602385471328, 1.065927015135751, 1.0453144714407552, 1.0110553320228033, 0.9900637907148588, 0.9618433024411556, 0.9409466977604016, 0.9184613303413715, 0.8959982159749676, 0.8790987197787278, 0.8602390454615657, 0.8410291237019341, 0.8200472590231683, 0.8036247007036755, 0.7909117242781721], 'loglikelihood': [-138379.73196191844, -132933.00697960873, -129397.98415939865, -126530.57279572886, -123960.73414372109, -121717.61056828291, -119814.62465031014, -118439.74426996421, -117111.14221036198, -115654.9915051248, -114264.97943092373, -113001.03622466451, -111856.12078137076, -110526.01789023768, -109321.18170649008, -108334.38968303277, -107257.48599766825, -106317.5299180052, -105395.42369197553, -104299.4118339516, -103791.11489491147, -102798.67578921128, -102188.75536988786, -101136.18855606556, -100483.28212715489, -99600.8398849109, -98935.61539521479, -98205.48454652501, -97443.1671045164, -96885.48032223292, -96231.39129112643, -95585.43415547578, -94864.8409259289, -94256.10530471621, -93802.97167136439], 'AIC': [278281.4639238369, 268910.01395921747, 263361.9683187973, 259149.14559145772, 255531.46828744217, 252567.22113656581, 250283.2493006203, 249055.48853992842, 247920.28442072397, 246529.9830102496, 245271.95886184747, 244266.07244932902, 243498.24156274152, 242360.03578047536, 241472.36341298016, 241020.77936606554, 240388.9719953365, 240031.0598360104, 239708.84738395107, 239038.8236679032, 239544.22978982294, 239081.35157842256, 239383.51073977572, 238800.37711213113, 239016.56425430978, 238773.6797698218, 238965.23079042957, 239026.96909305002, 239024.3342090328, 239430.96064446584, 239644.78258225287, 239874.86831095157, 239955.6818518578, 240260.21060943243, 240875.94334272877], 'BIC': [285250.93444804713, 282848.955007638, 284270.3798914281, 287027.0276882988, 290378.82090849354, 294384.0442818274, 299069.54297009215, 304811.25273361057, 310645.5191386164, 316224.6882523523, 321936.1346281604, 327899.71873985225, 334101.358377475, 339932.6231194191, 346014.4212761342, 352532.30775342986, 358869.97090691107, 365481.5292717952, 372128.78734394617, 378428.23415210855, 385903.11079823854, 392409.70311104844, 399681.33279661194, 406067.66969317757, 413253.32735956647, 419979.9133992888, 427140.93494410685, 434172.1437709376, 441138.97941113054, 448515.0763707739, 455698.3688327712, 462897.92508568015, 469948.2091507967, 477222.20843258157, 484807.41169008816]}
'''
|
apache-2.0
|
Petr-By/qtpyvis
|
qtgui/widgets/matplotlib.py
|
1
|
4136
|
import sys
import time
import numpy as np
#from matplotlib.backends.qt_compat import QtCore, QtWidgets
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QVBoxLayout
class QMatplotlib(FigureCanvas):
"""A Qt widget that holds a MatPlotLib :py:class:`Figure`.
The figure can be accessed as property `figure`.
"""
def __init__(self, parent=None, figsize=(8, 3)):
super().__init__(Figure(figsize))
# How to accept keyboard focus:
# Qt.NoFocus: accept no focus at all [default]
# Qt.TabFocus: focus by tabbing clicking
# Qt.ClickFocus: focus by mouse clicking
# Qt.StrongFocus = both (Qt.TabFocus or Qt.ClickFocus)
self.setFocusPolicy(Qt.ClickFocus)
# Actively grab the focus
#self.setFocus()
# Matplotlib events:
# https://matplotlib.org/users/event_handling.html
cid_key = self.mpl_connect('key_press_event', self._onKeyPress)
cid_mouse = self.mpl_connect('button_press_event', self._onMousePress)
# Figure.subplots is a new feature in Matplotlib 2.1.
#self._ax = self.figure.subplots()
self._ax = self.figure.add_subplot(111)
#
# Place some default content
#
self.showMessage('matplotlib')
#t = np.linspace(0, 10, 501)
#self._ax.plot(t, np.tan(t), ".")
#layout = QVBoxLayout(self)
#
#static_canvas = FigureCanvas(Figure(figsize=(5, 3)))
#layout.addWidget(static_canvas)
#self.addToolBar(NavigationToolbar(static_canvas, self))
#
#dynamic_canvas = FigureCanvas(Figure(figsize=(5, 3)))
#layout.addWidget(dynamic_canvas)
#self.addToolBar(QtCore.Qt.BottomToolBarArea,
# NavigationToolbar(dynamic_canvas, self))
# ...
#self._dynamic_ax = dynamic_canvas.figure.subplots()
#self._timer = dynamic_canvas.new_timer(
# 100, [(self._update_canvas, (), {})])
#self._timer.start()
def __enter__(self):
self._ax.clear()
return self._ax
def __exit__(self, type, value, traceback):
self._ax.figure.canvas.draw()
def _onKeyPress(self, event):
print(f"Matplotlib: you pressed '{event.key}'")
def _onMousePress(self, event):
click = 'double' if event.dblclick else 'single'
button = event.button
pixel_x = event.x
pixel_y = event.y
data_x = event.xdata
data_y = event.ydata
print(f"Matplotlib: {click} click with button {button}"
f" x={pixel_x}, y={pixel_y}," +
("None" if data_x is None else f"data=({data_x},{data_y})"))
def _update_canvas(self):
# FIXME[old]
self._ax.clear()
t = np.linspace(0, 10, 101)
# Shift the sinusoid as a function of time.
self._ax.plot(t, np.sin(t + time.time()))
self._ax.figure.canvas.draw()
def scatter(self, *args, **kwargs):
with self as ax:
ax.scatter(*args, **kwargs)
def imshow(self, *args, **kwargs):
with self as ax:
ax.imshow(*args, **kwargs)
def plot(self, *args, **kwargs):
with self as ax:
ax.plot(*args, **kwargs)
def showMessage(self, message: str):
with self as ax:
ax.axis('off')
ax.text(0.5, 0.5, message,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
def noData(self):
self.showMessage("No data")
def save(self, filename: str=None) -> None:
pass
#FIXME[old]:
# model_name="vae_mnist"
# filename = os.path.join(model_name, "digits_over_latent.png")
# plt.savefig(filename)
# filename = os.path.join(model_name, "vae_mean.png")
# os.makedirs(model_name, exist_ok=True)
# plt.savefig(filename)
# plt.show()
|
mit
|
heli522/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
176
|
12155
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
|
bsd-3-clause
|
tardis-sn/tardis
|
tardis/conftest.py
|
1
|
5024
|
"""Configure Test Suite.
This file is used to configure the behavior of pytest when using the Astropy
test infrastructure. It needs to live inside the package in order for it to
get picked up when running the tests inside an interpreter using
packagename.test
"""
import os
from astropy.version import version as astropy_version
# For Astropy 3.0 and later, we can use the standalone pytest plugin
if astropy_version < "3.0":
from astropy.tests.pytest_plugins import * # noqa
del pytest_report_header
ASTROPY_HEADER = True
else:
try:
from pytest_astropy_header.display import (
PYTEST_HEADER_MODULES,
TESTED_VERSIONS,
)
ASTROPY_HEADER = True
except ImportError:
ASTROPY_HEADER = False
def pytest_configure(config):
"""Configure Pytest with Astropy.
Parameters
----------
config : pytest configuration
"""
if ASTROPY_HEADER:
config.option.astropy_header = True
# Customize the following lines to add/remove entries from the list of
# packages for which version numbers are displayed when running the tests.
PYTEST_HEADER_MODULES.pop("Pandas", None)
PYTEST_HEADER_MODULES["scikit-image"] = "skimage"
from . import __version__
packagename = os.path.basename(os.path.dirname(__file__))
TESTED_VERSIONS[packagename] = __version__
# Uncomment the last two lines in this block to treat all DeprecationWarnings as
# exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
# as follow (although default should work for most cases).
# To ignore some packages that produce deprecation warnings on import
# (in addition to 'compiler', 'scipy', 'pygments', 'ipykernel', and
# 'setuptools'), add:
# modules_to_ignore_on_import=['module_1', 'module_2']
# To ignore some specific deprecation warning messages for Python version
# MAJOR.MINOR or later, add:
# warnings_to_ignore_by_pyver={(MAJOR, MINOR): ['Message to ignore']}
# from astropy.tests.helper import enable_deprecations_as_exceptions # noqa
# enable_deprecations_as_exceptions()
# -------------------------------------------------------------------------
# Here the TARDIS testing stuff begins
# -------------------------------------------------------------------------
import pytest
import pandas as pd
from tardis.io.util import yaml_load_config_file
from tardis.io.config_reader import Configuration
from tardis.simulation import Simulation
def pytest_addoption(parser):
parser.addoption(
"--tardis-refdata", default=None, help="Path to Tardis Reference Folder"
)
parser.addoption(
"--integration-tests",
dest="integration-tests",
default=None,
help="path to configuration file for integration tests",
)
parser.addoption(
"--generate-reference",
action="store_true",
default=False,
help="generate reference data instead of testing",
)
parser.addoption(
"--less-packets",
action="store_true",
default=False,
help="Run integration tests with less packets.",
)
# -------------------------------------------------------------------------
# project specific fixtures
# -------------------------------------------------------------------------
@pytest.fixture(scope="session")
def generate_reference(request):
option = request.config.getoption("--generate-reference")
if option is None:
return False
else:
return option
@pytest.fixture(scope="session")
def tardis_ref_path(request):
tardis_ref_path = request.config.getoption("--tardis-refdata")
if tardis_ref_path is None:
pytest.skip("--tardis-refdata was not specified")
else:
return os.path.expandvars(os.path.expanduser(tardis_ref_path))
from tardis.tests.fixtures.atom_data import *
@pytest.yield_fixture(scope="session")
def tardis_ref_data(tardis_ref_path, generate_reference):
if generate_reference:
mode = "w"
else:
mode = "r"
with pd.HDFStore(
os.path.join(tardis_ref_path, "unit_test_data.h5"), mode=mode
) as store:
yield store
@pytest.fixture
def tardis_config_verysimple():
return yaml_load_config_file(
"tardis/io/tests/data/tardis_configv1_verysimple.yml"
)
###
# HDF Fixtures
###
@pytest.fixture(scope="session")
def hdf_file_path(tmpdir_factory):
path = tmpdir_factory.mktemp("hdf_buffer").join("test.hdf")
return str(path)
@pytest.fixture(scope="session")
def config_verysimple():
filename = "tardis_configv1_verysimple.yml"
path = os.path.abspath(os.path.join("tardis/io/tests/data/", filename))
config = Configuration.from_yaml(path)
return config
@pytest.fixture(scope="session")
def simulation_verysimple(config_verysimple, atomic_dataset):
atomic_data = deepcopy(atomic_dataset)
sim = Simulation.from_config(config_verysimple, atom_data=atomic_data)
sim.iterate(4000)
return sim
|
bsd-3-clause
|
evidation-health/bokeh
|
bokeh/charts/builder/line_builder.py
|
4
|
7285
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from six import iteritems
from itertools import chain
from .._builder import XYBuilder, create_and_build
from ..glyphs import LineGlyph
from .._attributes import DashAttr, ColorAttr
from ...models.sources import ColumnDataSource
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def Line(data=None, x=None, y=None, **kws):
""" Create a line chart using :class:`LineBuilder <bokeh.charts.builder.line_builder.LineBuilder>` to
render the geometry from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.charts import Line, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = np.array([[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]])
line = Line(xyvalues, title="line", legend="top_left", ylabel='Languages')
output_file('line.html')
show(line)
"""
kws['x'] = x
kws['y'] = y
return create_and_build(LineBuilder, data, **kws)
class LineBuilder(XYBuilder):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
default_attributes = {'color': ColorAttr(),
'dash': DashAttr()}
dimensions = ['y', 'x']
@property
def measures(self):
if isinstance(self.y.selection, list):
return self.y.selection
elif isinstance(self.x.selection, list):
return self.x.selection
else:
return None
@property
def measure_input(self):
return isinstance(self.y.selection, list) or isinstance(self.x.selection, list)
def _setup(self):
"""Handle input options that require transforming data and/or user selections."""
# handle special case of inputs as measures
if self.measure_input:
# Check if we stack measurements and by which attributes
stack_flags = {'color': self.attr_measurement('color'),
'dash': self.attr_measurement('dash')}
# collect the other columns used as identifiers, that aren't a measurement name
id_cols = [self.attributes[attr].columns
for attr, stack in iteritems(stack_flags) if not stack and
self.attributes[attr].columns != self.measures and
self.attributes[attr].columns is not None]
id_cols = list(chain.from_iterable(id_cols))
# if we have measures input, we need to stack by something, set default
if all(attr is False for attr in list(stack_flags.values())):
stack_flags['color'] = True
# stack the measurement dimension while keeping id columns
self._stack_measures(ids=id_cols)
# set the attributes to key off of the name of the stacked measurement, if stacked
if stack_flags['color']:
# color by the name of each variable
self.attributes['color'] = ColorAttr(columns='variable',
data=ColumnDataSource(self._data.df))
if stack_flags['dash']:
# dash by the name of each variable
self.attributes['dash'] = DashAttr(columns='variable',
data=ColumnDataSource(self._data.df))
# Handle when to use special column names
if self.x.selection is None and self.y.selection is not None:
self.x.selection = 'index'
elif self.x.selection is not None and self.y.selection is None:
self.y.selection = 'index'
def attr_measurement(self, attr_name):
"""Detect if the attribute has been given measurement columns."""
cols = self.attributes[attr_name].columns
return (cols is not None and (cols == self.y.selection or
cols == self.x.selection))
def _stack_measures(self, ids):
"""Transform data so that id columns are kept and measures are stacked in single column."""
if isinstance(self.y.selection, list):
dim = 'y'
if self.x.selection is not None:
ids.append(self.x.selection)
else:
dim = 'x'
if self.y.selection is not None:
ids.append(self.y.selection)
if len(ids) == 0:
ids = None
dim_prop = getattr(self, dim)
# transform our data by stacking the measurements into one column
self._data.stack_measures(measures=dim_prop.selection, ids=ids)
# update our dimension with the updated data
dim_prop.set_data(self._data)
def _yield_renderers(self):
for group in self._data.groupby(**self.attributes):
glyph = LineGlyph(x=group.get_values(self.x.selection),
y=group.get_values(self.y.selection),
line_color=group['color'],
dash=group['dash'])
# save reference to composite glyph
self.add_glyph(group, glyph)
# yield each renderer produced by composite glyph
for renderer in glyph.renderers:
yield renderer
|
bsd-3-clause
|
EnvGen/toolbox
|
scripts/concoct/correctly_placed_16S.py
|
1
|
5131
|
import pandas as pd
import argparse
from collections import defaultdict
import os
import math
def fix_strange_metaxa_vals(vals):
for i, val in enumerate(vals):
if i == 2 and val == 'Flavobacteria':
vals[2] = 'Flavobacteriia'
if i == 3 and val == 'Micrococcales':
vals[3] = "Actinomycetales"
return vals
def main(args):
clustering = pd.read_table(args.clustering_file, sep=',', names=['contig_id', 'cluster_id'], index_col=0)
taxonomy_df = pd.read_table(args.taxonomy_file, header=None, index_col=0, names=["contig_id", "taxonomy", "bla", "bla1", "bla2"])
all_approved = pd.read_table(args.all_approved_file, header=None, names=["contig_id"], index_col=0)
checkm_taxonomy = pd.read_table(args.checkm_taxonomy_file, index_col=0)
all_approved_set = set(all_approved.index.values)
unapproved_rrna = defaultdict(int)
approved_rrna = {}
levels = ['kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
taxonomy_df['taxonomy'].fillna('', inplace=True)
for rrna_contig in taxonomy_df.index.values:
if rrna_contig in clustering.index:
cluster_id = clustering.loc[rrna_contig]['cluster_id']
metaxa_val = taxonomy_df.loc[rrna_contig]['taxonomy'].split(';')
metaxa_has_val = metaxa_val != ['']
if cluster_id in all_approved_set and metaxa_has_val:
checkm_val = checkm_taxonomy.loc[cluster_id]['Taxonomy'].split(';')
metaxa_val = fix_strange_metaxa_vals(metaxa_val)
matched_level = None
for i, level in enumerate(levels):
checkm_level_val, metaxa_level_val = None, None
if len(checkm_val) > i and len(metaxa_val) > i:
checkm_level_val = checkm_val[i][3:]
metaxa_level_val = metaxa_val[i]
if level == 'species':
metaxa_level_val = metaxa_val[i].replace(' ', '_')
if checkm_level_val == metaxa_level_val:
matched_level = i
else:
break
else:
matched_level = i-1
break
if cluster_id not in approved_rrna:
approved_rrna[cluster_id] = {'matching': 0, 'not matching': 0}
if matched_level >= args.level:
approved_rrna[cluster_id]['matching'] += 1
else:
approved_rrna[cluster_id]['not matching'] += 1
#print(most_detailed_level_checkm, most_detailed_level_metaxa)
#print(most_detailed_matched_level)
#print(taxonomy_df.loc[rrna_contig]['taxonomy'], checkm_taxonomy.loc[cluster_id]['Taxonomy'])
else:
unapproved_rrna[cluster_id] += 1
for cluster_id in all_approved_set:
if cluster_id not in approved_rrna:
approved_rrna[cluster_id] = {'matching': 0, 'not matching': 0}
approved_stats_df = pd.DataFrame.from_dict(approved_rrna, orient='index')
unapproved_stats_df = pd.DataFrame.from_dict(unapproved_rrna, orient='index')
unapproved_stats_df.columns = ['nr_rrna']
with open(os.path.join(args.outdir, 'stats_per_approved.tsv'), 'w') as ofh:
approved_stats_df.to_csv(ofh, sep='\t')
with open(os.path.join(args.outdir, 'stats_per_unapproved.tsv'), 'w') as ofh:
unapproved_stats_df.to_csv(ofh, sep='\t')
with open(os.path.join(args.outdir, 'summary_nr_matches.tsv'), 'w') as ofh:
print(len(approved_stats_df[approved_stats_df['matching'] != 0]), len(approved_stats_df), file=ofh)
with open(os.path.join(args.outdir, 'summary_nr_mismatches.tsv'), 'w') as ofh:
print(len(approved_stats_df[approved_stats_df['not matching'] != 0]), len(approved_stats_df), file=ofh)
# Things to output:
#
# Number of approved genomes with matching rrna
# Number of approved genomes with unmatching rrna
# Number of rrna genes per bin
#
# Number of approved genomes with > 0 matching rrna and < 2 unmatching rrna
# Matching is counted at order level
#
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--clustering_file', help="e.g. ../../Data/test_binning_and_16s_combo/clustering_nocutup.csv")
parser.add_argument('--taxonomy_file', help="e.g. ../../Data/test_binning_and_16s_combo/all_contigs.taxonomy.txt")
parser.add_argument('--all_approved_file', help="e.g. ../../Data/test_binning_and_16s_combo/list_of_all_approved_bins_nocutup.tsv")
parser.add_argument('--checkm_taxonomy_file', help="e.g. ../../Data/test_binning_and_16s_combo/checkm_tree_qa.tsv")
parser.add_argument('--level', type=int, help='Taxonomic level to run comparison on kingdom: 0, phylum: 1, class: 2, order: 3, family: 4, genus: 5, species: 6')
parser.add_argument('--outdir', help="A directory for output files")
args = parser.parse_args()
main(args)
|
mit
|
mjudsp/Tsallis
|
examples/neural_networks/plot_mlp_training_curves.py
|
56
|
3596
|
"""
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
|
bsd-3-clause
|
droundy/deft
|
papers/pair-correlation/figs/plot-dadz-inverse-sixth.py
|
1
|
1962
|
#!/usr/bin/python
from __future__ import division
import matplotlib
import sys
import styles # for our style choices for these plots
if 'show' not in sys.argv:
matplotlib.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
if len(sys.argv) < 2:
print(("Usage: " + sys.argv[0] + " eta"))
exit(1)
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
eta = float(sys.argv[1])
#arg eta = [.2,.3]
able_to_read_file = True
plt.title('$da/dz,$ $\eta = %g,$ square well' %(eta))
data = loadtxt("figs/mc/a1/inverse-sixth-0.3-rmax-5.dat")
mc_z0, mc_da_dz = data[:, 0], data[:, 1]
# data = loadtxt("figs/walls/inverse-sixth-dadz-this-work-%04.2f-rmax-5.dat" % eta)
# tw_z0, tw_dadz = data[:,0], data[:,1]
data = loadtxt("figs/walls/inverse-sixth-dadz-this-work-short-%04.2f-rmax-5.dat" % eta)
tws_z0, tws_dadz = data[:, 0], data[:, 1]
# datamc = loadtxt("figs/walls/inverse-sixth-dadz-this-work-mc-%04.2f-rmax-5.dat" % eta)
# twmc_z0, twmc_dadz = datamc[:,0], datamc[:,1]
data = loadtxt("figs/walls/inverse-sixth-dadz-sokolowski-%04.2f-rmax-5.dat" % eta)
s_z0, s_dadz = data[:, 0], data[:, 1]
plt.figure(figsize=(6, 4))
plt.plot(mc_z0[::10], mc_da_dz[::10], styles.plot['mc'], label=styles.title['mc'])
# plt.plot(tw_z0, tw_dadz, styles.plot['this-work'], label=styles.title['this-work'])
plt.plot(tws_z0, tws_dadz, styles.plot['this-work-short'], label=styles.title['this-work-short'])
# plt.plot(twmc_z0, twmc_dadz, styles.plot['this-work-mc'], label=styles.title['this-work-mc'])
plt.plot(s_z0, s_dadz, styles.plot['sokolowski'], label=styles.title['sokolowski'])
plt.xlim([-.5, 6])
plt.legend(loc='best').draw_frame(False)
plt.xlabel('$z/R$')
plt.ylabel(r'$dF_1/dz$')
plt.yticks([])
#plt.xticks([0,1,2,3])
plt.title('$\Phi(r) = \Theta(2.5\sigma - r)/r^6$ $\eta = %g$' % eta)
plt.tight_layout()
savefig("figs/dadz-inverse-sixth-%d.pdf" % (int(eta*10)))
plt.show()
|
gpl-2.0
|
jlegendary/scikit-learn
|
examples/ensemble/plot_adaboost_regression.py
|
311
|
1529
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
Winterflower/ibis
|
ibis/tests/test_comms.py
|
16
|
11505
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
import pytest
import numpy as np
from ibis.util import guid
from ibis.compat import unittest
try:
import ibis.comms as comms
from ibis.comms import (SharedMmap, IbisType, IbisTableReader,
IbisTableWriter)
SKIP_TESTS = False
except ImportError:
SKIP_TESTS = True
def _nuke(path):
try:
os.remove(path)
except os.error:
pass
pytestmark = pytest.mark.skipif(SKIP_TESTS,
reason='Comms extension disabled')
class TestIPCLock(unittest.TestCase):
def setUp(self):
if sys.platform == 'darwin':
raise unittest.SkipTest
self.timeout = 1
self.master = comms.IPCLock(is_slave=0, lock_timeout_ms=self.timeout)
self.slave = comms.IPCLock(self.master.semaphore_id,
lock_timeout_ms=self.timeout)
def test_acquire_and_release(self):
# It's not our turn
self.assertFalse(self.master.acquire(block=False))
self.slave.acquire()
self.slave.release()
self.assertTrue(self.master.acquire())
def test_cleanup_semaphore_arrays(self):
# Otherwise, there will be too many semaphore arrays floating around
for i in range(500):
comms.IPCLock(is_slave=0)
def test_thread_blocking(self):
lock = threading.Lock()
results = []
# This also verifies that the GIL is correctly dropped
def ping():
while True:
with self.slave:
with lock:
if len(results) == 4:
break
results.append('ping')
def pong():
while True:
with self.master:
with lock:
if len(results) == 4:
break
results.append('pong')
t1 = threading.Thread(target=pong)
t1.start()
t2 = threading.Thread(target=ping)
t2.start()
t1.join()
t2.join()
ex_results = ['ping', 'pong'] * 2
assert results == ex_results
class TestSharedMmap(unittest.TestCase):
def setUp(self):
self.to_nuke = []
def tearDown(self):
for path in self.to_nuke:
_nuke(path)
def test_create_file(self):
size = 1024
path = guid()
try:
mm = SharedMmap(path, size, create=True)
mm.close()
self.assertTrue(os.path.exists(path))
self.assertEqual(os.stat(path).st_size, size)
finally:
_nuke(path)
def test_file_not_exist(self):
path = guid()
self.assertRaises(IOError, SharedMmap, path, 1024)
self.assertRaises(IOError, SharedMmap, path, 1024, offset=20,
create=True)
def test_close_file(self):
path = guid()
self.to_nuke.append(path)
data = guid()
mm = SharedMmap(path, len(data), create=True)
assert mm.closed is False
mm.close()
assert mm.closed is True
# idempotent
mm.close()
assert mm.closed is True
self.assertRaises(IOError, mm.read, 4)
self.assertRaises(IOError, mm.write, 'bazqux')
self.assertRaises(IOError, mm.seek, 0)
self.assertRaises(IOError, mm.flush)
def test_file_interface(self):
path = guid()
self.to_nuke.append(path)
data = guid()
mm = SharedMmap(path, len(data), create=True)
assert mm.tell() == 0
mm.write(data)
assert mm.tell() == len(data)
mm.seek(0)
assert mm.tell() == 0
result = mm.read(16)
assert len(result) == 16
assert result == data[:16]
assert mm.tell() == 16
def test_multiple_mmaps(self):
path = guid()
path2 = guid()
data = guid()
self.to_nuke.extend([path, path2])
mm1 = SharedMmap(path, len(data), create=True)
mm1.write(data)
mm2 = SharedMmap(path, len(data))
result = mm2.read()
self.assertEqual(result, data)
# Open both maps first, see if data synchronizes
mm1 = SharedMmap(path2, len(data), create=True)
mm2 = SharedMmap(path2, len(data))
mm1.write(data)
result = mm2.read()
self.assertEqual(result, data)
def rand_bool(N):
return np.random.randint(0, 2, size=N).astype(np.uint8)
def rand_int_span(dtype, N):
info = np.iinfo(dtype)
lo, hi = info.min, info.max
return np.random.randint(lo, hi, size=N).astype(dtype)
def bool_ex(N):
mask = rand_bool(N)
values = rand_bool(N)
return _to_masked(values, mask, IbisType.BOOLEAN)
def int_ex(N, ibis_type):
mask = rand_bool(N)
nptype = comms._ibis_to_numpy[ibis_type]
values = rand_int_span(nptype, N)
return _to_masked(values, mask, ibis_type)
def double_ex(N):
mask = rand_bool(N)
values = np.random.randn(N)
return _to_masked(values, mask, IbisType.DOUBLE)
def _to_masked(values, mask, dtype):
return comms.masked_from_numpy(values, mask, dtype)
class TestImpalaMaskedFormat(unittest.TestCase):
"""
Check that data makes it to and from the file format, and that it can be
correctly transformed to the appropriate NumPy/pandas/etc. format
"""
N = 1000
def _check_roundtrip(self, columns):
writer = IbisTableWriter(columns)
table_size = writer.total_size()
buf = comms.RAMBuffer(table_size)
writer.write(buf)
buf.seek(0)
reader = IbisTableReader(buf)
for i, expected in enumerate(columns):
result = reader.get_column(i)
assert result.equals(expected)
def test_basic_diverse_table(self):
columns = [
bool_ex(self.N),
int_ex(self.N, IbisType.TINYINT),
int_ex(self.N, IbisType.SMALLINT),
int_ex(self.N, IbisType.INT),
int_ex(self.N, IbisType.BIGINT)
]
self._check_roundtrip(columns)
def test_boolean(self):
col = bool_ex(self.N)
self.assertEqual(col.nbytes(), self.N * 2)
self._check_roundtrip([col])
# Booleans with nulls will come out as object arrays with None for each
# null. This is how pandas handles things
result = col.to_numpy_for_pandas()
assert result.dtype == object
_check_masked_correct(col, result, np.bool_,
lambda x: x is None)
# No nulls, get boolean dtype
mask = np.zeros(self.N, dtype=np.uint8)
values = rand_bool(self.N)
col2 = _to_masked(values, mask, IbisType.BOOLEAN)
result2 = col2.to_numpy_for_pandas()
_check_masked_correct(col2, result2, np.bool_,
lambda x: x is None)
# Get a numpy.ma.MaskedArray
# masked_result = col.to_masked_array()
# didn't copy
# assert not masked_result.flags.owndata
# assert masked_result.base is col
# For each integer type, address conversion back to NumPy rep's: masked
# array, pandas-compatible (nulls force upcast to float + NaN for NULL)
def test_tinyint(self):
col = int_ex(self.N, IbisType.TINYINT)
self.assertEqual(col.nbytes(), self.N * 2)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int8)
_check_pandas_ints_no_nulls(self.N, IbisType.TINYINT)
def test_smallint(self):
col = int_ex(self.N, IbisType.SMALLINT)
self.assertEqual(col.nbytes(), self.N * 3)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int16)
_check_pandas_ints_no_nulls(self.N, IbisType.SMALLINT)
def test_int(self):
col = int_ex(self.N, IbisType.INT)
self.assertEqual(col.nbytes(), self.N * 5)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int32)
_check_pandas_ints_no_nulls(self.N, IbisType.INT)
def test_int_segfault(self):
col = int_ex(1000000, IbisType.INT)
col.to_numpy_for_pandas()
def test_bigint(self):
col = int_ex(self.N, IbisType.BIGINT)
self.assertEqual(col.nbytes(), self.N * 9)
self._check_roundtrip([col])
_check_pandas_ints_nulls(col, np.int64)
_check_pandas_ints_no_nulls(self.N, IbisType.BIGINT)
def test_float(self):
mask = rand_bool(self.N)
values = np.random.randn(self.N).astype(np.float32)
col = _to_masked(values, mask, IbisType.FLOAT)
self.assertEqual(col.nbytes(), self.N * 5)
self._check_roundtrip([col])
result = col.to_numpy_for_pandas()
assert result.dtype == np.float32
mask = np.isnan(result)
ex_mask = col.mask().view(np.bool_)
assert np.array_equal(mask, ex_mask)
def test_double(self):
col = double_ex(self.N)
self.assertEqual(col.nbytes(), self.N * 9)
self._check_roundtrip([col])
result = col.to_numpy_for_pandas()
assert result.dtype == np.float64
mask = np.isnan(result)
ex_mask = col.mask().view(np.bool_)
assert np.array_equal(mask, ex_mask)
def test_string_pyobject(self):
# pandas handles strings in object-type (NPY_OBJECT) arrays and uses
# either None or NaN for nulls. For the time being we'll be consistent
# with that
#
pass
def test_timestamp(self):
pass
def test_decimal(self):
pass
def test_multiple_string_columns(self):
# For the time being, string (STRING, VARCHAR, CHAR) columns will all
# share the same intern table
pass
def _check_pandas_ints_nulls(col, dtype):
result = col.to_numpy_for_pandas()
assert result.dtype == np.float64
_check_masked_correct(col, result, dtype, np.isnan)
def _check_pandas_ints_no_nulls(N, ibis_type):
nptype = comms._ibis_to_numpy[ibis_type]
mask = np.zeros(N, dtype=np.uint8)
values = rand_int_span(nptype, N)
col = _to_masked(values, mask, ibis_type)
result = col.to_numpy_for_pandas()
assert result.dtype == nptype
_check_masked_correct(col, result, nptype, lambda x: False)
def _check_masked_correct(col, result, dtype, is_na_f):
mask = col.mask()
data = col.data_bytes().view(dtype)
for i, v in enumerate(result):
if mask[i]:
assert is_na_f(v)
else:
# For comparisons outside representable integer range, this may
# yield incorrect results
assert v == data[i]
class TestTableRoundTrip(unittest.TestCase):
"""
Test things not captured by datatype-specific tests
"""
def test_table_metadata(self):
# Check values from preamble
pass
|
apache-2.0
|
rohanp/scikit-learn
|
examples/neural_networks/plot_mlp_alpha.py
|
17
|
4088
|
"""
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundery.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
|
bsd-3-clause
|
fabioticconi/scikit-learn
|
sklearn/preprocessing/tests/test_label.py
|
34
|
18227
|
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
|
bsd-3-clause
|
camallen/aggregation
|
experimental/penguins/clusterAnalysis/check.py
|
2
|
2981
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from divisiveDBSCAN import DivisiveDBSCAN
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
print base_directory
client = pymongo.MongoClient()
db = client['penguin_2014-10-12']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
steps = [5,10,15,20]
penguins_at = {k:[] for k in steps}
alreadyThere = False
subject_index = 0
import cPickle as pickle
#to_sample = pickle.load(open(base_directory+"/Databases/sample.pickle","rb"))
import random
#for subject in collection2.find({"classification_count": 20}):
alreadyThere = True
user_markings = [] #{k:[] for k in steps}
user_ips = [] #{k:[] for k in steps}
zooniverse_id = "APZ0000nw3"
user_index = 0
for classification in collection.find({"subjects" : {"$elemMatch": {"zooniverse_id":zooniverse_id}}}):
user_index += 1
if user_index == 21:
break
per_user = []
ip = classification["user_ip"]
try:
markings_list = classification["annotations"][1]["value"]
if isinstance(markings_list,dict):
for marking in markings_list.values():
if marking["value"] in ["adult","chick"]:
x,y = (float(marking["x"]),float(marking["y"]))
user_markings.append((x,y))
user_ips.append(ip)
except (KeyError, ValueError):
#classification["annotations"]
user_index += -1
user_identified_penguins = DivisiveDBSCAN(3).fit(user_markings,user_ips)#,base_directory + "/Databases/penguins/images/"+object_id+".JPG")
#penguins_at[s].append(len(user_identified_penguins))
#print str(s) + " - " + str(len(user_identified_penguins))
X,Y = zip(*user_identified_penguins)
subject = collection2.find_one({"zooniverse_id": zooniverse_id})
url = subject["location"]["standard"]
fName = url.split("/")[-1]
print "http://demo.zooniverse.org/penguins/subjects/standard/"+fName
if not(os.path.isfile(base_directory + "/Databases/penguins/images/"+fName)):
#urllib.urlretrieve ("http://demo.zooniverse.org/penguins/subjects/standard/"+fName, "/home/greg/Databases/penguins/images/"+fName)
urllib.urlretrieve ("http://www.penguinwatch.org/subjects/standard/"+fName, base_directory+"/Databases/penguins/images/"+fName)
print "/home/greg/Databases/penguins/images/"+fName
image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+fName)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
plt.plot(X,Y,'.')
plt.show()
|
apache-2.0
|
wasit7/book_pae
|
pae/final_code/src/create_dfmore20.py
|
1
|
3454
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 23:15:03 2016
@author: Methinee
"""
import pandas as pd
import numpy as np
import pickle
import os
import matplotlib
matplotlib.style.use('ggplot')
from collections import defaultdict
##////////////////Create dfmore20 without merging(schoolGpa,province)/////////////
#df_file = pd.read_excel('../src/transform.xlsx')
#headers=list(df_file.columns.values)
#
##replace grade with integer and noplus in each grade
##{'':0, 'U#':1, 'U':1, 'S#':2, 'S':2, 'W':3, 'F':4, 'D':5, 'D+':5, 'C':6, 'C+':6, 'B':7, 'B+':7, 'A':8}
#df_file = df_file.fillna(0)
#df_file = df_file.replace(['A', 'B+', 'B', 'C+', 'C' , 'D+' , 'D' , 'F' , 'W' , 'S' , 'S#' , 'U' , 'U#'],
# [8, 7, 7, 6 , 6, 5, 5, 4, 3, 2, 2, 1, 1])
#
##Select subject that have student enroll >=20
#count_courseId = df_file["3COURSEID"].value_counts()
#more20 = count_courseId[count_courseId[:]>=20]
#less20 = count_courseId[count_courseId[:]<20]
#df_more20 = df_file[df_file["3COURSEID"].isin(more20.index)]
#df_more20.to_csv('../data'+'/df_more20.csv') #create new dataframe (>=20 enrollment)
#df_less20 = df_file[~df_file["3COURSEID"].isin(more20.index)]
#df_less20.to_csv('../data'+'/df_more20.csv') #create new dataframe (>=20 enrollment)
#
##Create new file csv, all subject(>=20) and random order
#for m in more20.index:
# dfx=df_more20[df_more20["3COURSEID"].isin([m])]
# dfx=dfx.iloc[np.random.permutation(len(dfx))]
# dfx.to_csv('../data/df_sub_more20'+"/df_%s.csv"%m)
#more20.plot(kind='bar')
#////////////////Create dfmore20 with merging(schoolGpa,province)/////////////
#df_file = pd.read_csv('../data'+'/df_dropSub_less20_dropNaResult.csv',delimiter=",", skip_blank_lines = True,
# error_bad_lines=False)
df_file = pd.read_excel('../data/transform_merge.xlsx')
headers=list(df_file.columns.values)
#replace grade with integer and noplus in each grade
#{'':0, 'U#':1, 'U':1, 'S#':2, 'S':2, 'W':3, 'F':4, 'D':5, 'D+':5, 'C':6, 'C+':6, 'B':7, 'B+':7, 'A':8}
df_file = df_file.fillna(0)
df_file = df_file.replace(['A', 'B+', 'B', 'C+', 'C' , 'D+' , 'D' , 'F' , 'W' , 'S' , 'S#' , 'U' , 'U#'],
[8, 7, 7, 6 , 6, 5, 5, 4, 3, 2, 2, 1, 1])
#Select subject that have student enroll >=20
count_courseId = df_file["3COURSEID"].value_counts()
more20 = count_courseId[count_courseId[:]>=20]
less20 = count_courseId[count_courseId[:]<20]
df_more20 = df_file[df_file["3COURSEID"].isin(more20.index)]
df_more20.to_csv('../data'+'/df_more20.csv') #create new dataframe (>=20 enrollment)
df_less20 = df_file[~df_file["3COURSEID"].isin(more20.index)]
df_less20.to_csv('../data'+'/df_less20.csv') #create new dataframe (>=20 enrollment))
#Create new file csv, all subject(>=20) and random order
for m in more20.index:
dfx=df_more20[df_more20["3COURSEID"].isin([m])]
dfx=dfx.iloc[np.random.permutation(len(dfx))]
dfx.to_csv('../data/df_sub_more20_merge'+"/df_%s.csv"%m)
more20.plot(kind='bar')
#Create new Dataframe (drop column subject that less than 20)
subjects = []
countSub = 0
for sub in df_less20['3COURSEID']:
if sub not in subjects:
subjects.append(sub)
countSub = countSub+1
for drop in subjects:
df_file = df_file.drop([drop],axis=1)
df_file = df_file[df_file["3COURSEID"].isin(more20.index)]
#df_file.to_csv('../data'+'/df_dropSub_less20.csv') #create new dataframe (>=20 enrollment))
|
mit
|
wanggang3333/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
gbrammer/pygrism
|
background.py
|
2
|
24723
|
import os
import pyfits
import numpy as np
import glob
import shutil
import time
import matplotlib.pyplot as plt
# testing by Britt
USE_PLOT_GUI=False
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.ticker as mticker
import threedhst
import threedhst.eazyPy as eazy
import threedhst.catIO as catIO
import unicorn
HAS_PHOTOMETRY = True
PHOTOMETRY_ID = None
BAD_SPECTRUM = False
SPC_FILENAME = None
SPC = None
IREF = os.getenv('iref')
try:
flat_f140 = pyfits.open(IREF+'/uc721143i_pfl.fits')
flat_g141 = pyfits.open(IREF+'/u4m1335mi_pfl.fits')
flat = flat_g141[1].data[5:1019,5:1019] / flat_f140[1].data[5:1019, 5:1019]
flat[flat <= 0] = 5
flat[flat > 5] = 5
except:
print '\nthreedhst.grism_sky: Flat-field files (uc721143i_pfl.fits) not found in IREF: %s\n' %(IREF)
flat = np.ones((1014,1014))
xprofile = None
yprofile = None
try:
not_blobs = (pyfits.open(unicorn.GRISM_HOME+'COSMOS/RAW/ibhm29wnq_flt.fits.gz')[3].data & (4+32+16+512)) == 0
except:
not_blobs = np.ones((1014,1014))
def profile_msk(flt='ibhm46ioq_msk.fits', biweight=False, extension=0):
"""
Get a cut across the columns of a FLT image, optionally masking objects and DQ
pixels.
If `biweight`, then the output is the biweight mean of each column.
Otherwise, it's just the mean.
"""
import threedhst.grism_sky as bg
im = pyfits.open(flt)
mask = (im[extension].data == 0) | (~not_blobs)
shp = im[extension].data.shape
xpix = np.arange(shp[0])
N = np.ones(shp)
im[extension].data[mask] = 0
N[mask] = 0
ypix = np.sum(im[extension].data, axis=0) / np.sum(N, axis=0)
if biweight:
for i in range(shp[0]):
column = im[extension].data[:,i]
ypix[i] = threedhst.utils.biweight(column[column != 0], mean=True)
#
bg.xprofile, bg.yprofile = xpix, ypix
return xpix, ypix #, ylo, yhi
def show_profile():
import unicorn.background as bg
"""
Look at images collapsed along columns to separate into groups with
similar patterns.
"""
#### All fields
# flt_files = []
# for field in ['COSMOS','AEGIS','GOODS-N','GOODS-S','UDS']:
# fp = open('%s.G141.list' %(field))
# flt_files.extend(fp.readlines())
# fp.close()
#
bg_flt, bg_field, bg_val = np.loadtxt('background.%s.dat' %(filter), dtype=np.str, unpack=True)
flt_files = bg_flt
N = len(flt_files)
profiles = np.zeros((N, 1014))
for i,flt in enumerate(flt_files):
print flt, i
xi, yi = bg.profile_msk(flt=flt)
profiles[i,:] = yi
#
norm = np.ones(N)
test = norm > 0
for i in range(N):
yi = profiles[i,:]
test[i] = True
norm[i] = np.mean(yi[np.abs(xi-507) < 50])
if test[i]:
p = plt.plot(xi,yi/norm[i],color='red', alpha=0.05)
else:
norm[i] = 0
#### Scale msk images
for msk, ni in zip(bg_flt, norm):
im = pyfits.open(msk)
im[0].data *= 1./ni
im.writeto(msk.replace('msk','msk.s'), clobber=True)
print msk
#### Get some normalization points to sort into types
xint = np.array([50,170, 880, 940])
yint = np.ones((N,len(xint)))
dx = 5
for i in range(N):
yi = profiles[i,:]
for j in range(len(xint)):
yint[i,j] = np.median(yi[xint[j]-dx:xint[j]+dx])/norm[i]
r1 = yint[:,1]/yint[:,0]
r2 = yint[:,2]/yint[:,3]
plt.hist(r1, range=(1, 1.4), bins=40, alpha=0.7)
plt.hist(r2, range=(0.8, 1.4), bins=60, alpha=0.7)
#### Loop through to make subset images
so = np.argsort(r1)
NSET = 5
#### Get extremes
so = np.argsort(yint[:,1]/norm[i])
NSET, i = 25, 24
# i = 23,24
so = np.argsort(yint[:,2]/norm[i])
NSET, i = 120, 119
os.system('rm *.set*.*')
for i in range(NSET):
NI = len(so)/NSET
if i == (NSET-1):
max = len(so)
else:
max = (i+1)*NI
#
idx = so[i*NI:max]
root = 'set%03d' %(i+1)
unicorn.background.combine_subset(filter='G141', idx=idx, root=root)
#
for j in idx:
p = plt.plot(xi, profiles[j,:]/norm[j], color='red', alpha=0.05)
#
xi, yi = bg.profile_msk(flt='combine.%s.%s.fits' %(filter, root))
p = plt.plot(xi, yi, color=(0,0,(i+1.)/NSET))
##### Show earlier sky files
sky_files = glob.glob('../CONF/sky*.fits')
for sky_file in sky_files:
xi, yi = bg.profile_msk(flt=sky_file)
p = plt.plot(xi, yi, color='green')
plt.xlim(-10,1024)
plt.ylim(0.8,1.1)
#### Use these in "make_imaging_flat" below
root = 'set1'
idx = so[1*NI:2*NI]
root = 'set2'
idx = so[2*NI:3*NI]
root = 'set3'
idx = so[3*NI:]
root = 'set4'
#for i in range(len(flt_files)):
# flt_files[i] = flt_files[i][:-1].replace('msk','flt')
N = len(flt_files)
profiles = np.zeros((N, 1014))
for i,flt in enumerate(flt_files):
print flt
xi, yi = bg.profile_msk(flt=flt[:-1])
profiles[i,:] = yi
#### COSMOS
fp = open('COSMOS.G141.list')
flt_files = fp.readlines()
fp.close()
#for i in range(len(flt_files)):
# flt_files[i] = flt_files[i][:-1].replace('msk','flt')
N = len(flt_files)
profiles = np.zeros((N, 1014))
for i,flt in enumerate(flt_files):
print flt
xi, yi = bg.profile_msk(flt=flt[:-1])
profiles[i,:] = yi
norm = np.zeros(N)
test = norm > 0
for i in range(N):
yi = profiles[i,:]
norm[i] = np.mean(yi[np.abs(xi-507) < 50])
test[i] = np.median(yi[np.abs(xi-40) < 10]/norm[i]) < 1.95
if test[i]:
p = plt.plot(xi,yi/norm[i],color=(norm[i]/3.3,0,0), alpha=0.1)
else:
norm[i] = 0
profiles_norm = profiles / np.dot(norm.reshape(N,1), np.ones((1,1014)))
avg = np.mean(profiles_norm[norm != 0, :], axis=0)
plt.plot(xi, avg, color='blue', alpha=0.5)
# for i in range(N):
# yi = profiles[i,:]*1.
# if yi.sum() == 0:
# continue
# #
# yi-=0.
# nor = np.mean(yi[np.abs(xi-307) < 50])
# p = plt.plot(xi,yi/nor,color=(norm[i]/6,0,0), alpha=0.1)
plt.ylim(0.92,1.04)
plt.xlim(-10,1024)
plt.savefig('COSMOS_profile.png')
#### GOODS-N
fp = open('GOODS-N.G141.list')
flt_files = fp.readlines()
fp.close()
Ng = len(flt_files)
profiles_g = np.zeros((Ng, 1014))
for i,flt in enumerate(flt_files):
print flt
xi, yi = bg.profile_msk(flt=flt)
profiles_g[i,:] = yi
xi = np.arange(1014)
norm_g = np.zeros(Ng)
test = norm_g > 0
for i in range(Ng):
yi = profiles_g[i,:]
norm_g[i] = np.mean(yi[np.abs(xi-507) < 50])
# very hi
test[i] = np.median(yi[np.abs(xi-200) < 20]/norm_g[i]) > 1.02
# lo
#test[i] = np.median(yi[np.abs(xi-200) < 20]/norm_g[i]) < 1.01
#test[i] = test[i] & (np.median(yi[np.abs(xi-40) < 10]/norm_g[i]) > 0.96)
# hi
#test[i] = test[i] & (np.median(yi[np.abs(xi-40) < 10]/norm_g[i]) < 0.96)
#
test[i] = True
if test[i]:
p = plt.plot(xi,yi/norm_g[i],color=(0,0,norm_g[i]/1.8), alpha=0.1)
else:
norm_g[i]*=0
#
plt.ylim(0.92,1.04)
plt.xlim(-10,1024)
profiles_norm_g = profiles_g / np.dot(norm_g.reshape(Ng,1), np.ones((1,1014)))
avg_g = np.mean(profiles_norm_g[norm_g != 0, :], axis=0)
plt.plot(xi, avg_g, color='green', alpha=0.5)
plt.ylim(0.92,1.04)
plt.xlim(-10,1024)
plt.savefig('GOODS-N_profile.png')
#### AEGIS
fp = open('AEGIS.G141.list')
flt_files = fp.readlines()
fp.close()
Na = len(flt_files)
profiles_a = np.zeros((Na, 1014))
for i,flt in enumerate(flt_files):
print flt
xi, yi = bg.profile_msk(flt=flt)
profiles_a[i,:] = yi
xi = np.arange(1014)
norm_a = np.zeros(Na)
test = norm_a > 0
for i in range(Na):
yi = profiles_a[i,:]
norm_a[i] = np.mean(yi[np.abs(xi-507) < 50])
test[i] = True
# very hi
# test[i] = np.median(yi[np.abs(xi-200) < 20]/norm_a[i]) < 1.52
# lo
#test[i] = test[i] & (np.median(yi[np.abs(xi-40) < 10]/norm_a[i]) > 0.96)
# hi
#test[i] = test[i] & (np.median(yi[np.abs(xi-40) < 10]/norm_a[i]) < 0.96)
#
if test[i]:
p = plt.plot(xi,yi/norm_a[i],color=(0,0,norm_a[i]/1.8), alpha=0.1)
else:
norm_a[i]*=0
#
plt.ylim(0.92,1.04)
plt.xlim(-10,1024)
plt.savefig('AEGIS_profile.png')
#### GOODS-S
fp = open('GOODS-S.G141.list')
flt_files = fp.readlines()
fp.close()
Ngs = len(flt_files)
profiles_gs = np.zeros((Ngs, 1014))
for i,flt in enumerate(flt_files):
print flt
xi, yi = bg.profile_msk(flt=flt)
profiles_gs[i,:] = yi
xi = np.arange(1014)
norm_gs = np.zeros(Ngs)
test = norm_gs > 0
for i in range(Ngs):
yi = profiles_gs[i,:]
norm_gs[i] = np.mean(yi[np.abs(xi-507) < 50])
test[i] = True
# very hi
# test[i] = np.median(yi[np.abs(xi-200) < 20]/norm_a[i]) < 1.52
# lo
#test[i] = test[i] & (np.median(yi[np.abs(xi-40) < 10]/norm_a[i]) > 0.96)
# hi
#test[i] = test[i] & (np.median(yi[np.abs(xi-40) < 10]/norm_a[i]) < 0.96)
#
if test[i]:
p = plt.plot(xi,yi/norm_gs[i],color=(0,0,norm_gs[i]/1.8), alpha=0.1)
else:
norm_gs[i]*=0
#
plt.ylim(0.92,1.04)
plt.xlim(-10,1024)
plt.savefig('GOODS-S_profile.png')
def make_g141_bg():
"""
Make average background images with object masks
"""
from pyraf import iraf
os.chdir("/3DHST/Spectra/Work/Background")
field = 'COSMOS'
PATHS = []
files = []
for field in ['COSMOS','GOODS-N','GOODS-S','AEGIS','UDS']:
info = catIO.Readfile('/3DHST/Spectra/Work/%s/PREP_FLT/files.info' %(field))
field_files = info.file[info.filter == 'G141']
files.extend(field_files)
PATHS.extend(['/3DHST/Spectra/Work/%s/RAW/' %(field)] * len(info.file[info.filter == 'G141']))
field = 'ALL'
#files = glob.glob('ibhm*flt.seg.fits')
#PATH = ('/3DHST/Spectra/Work/%s/RAW/' %(field))*len(files)
# #### Direct flat-field
flat = flat_g141[1].data[5:1019,5:1019] / pyfits.open('COSMOS_f140w_flat.fits')[1].data[5:-5,5:-5]
flat[flat <= 0] = 5
flat[flat > 5] = 5
NF = len(files)
idx = np.arange(NF)
nxpix, nypix = 1014, 1014
#nxpix, nypix = 507, 507
X = np.zeros((NF, nxpix*nypix))
## Otherwise get it from "show_profile" above
test = idx > -10
for j,i in enumerate(idx):
if ~test[i]:
continue
#
fi = files[i]
if not os.path.exists(fi.replace('flt','flt.seg')):
continue
#
if os.path.exists(fi.replace('.gz','')+'.mask.reg'):
continue
#
flt = pyfits.open(PATHS[i]+files[i])
flt[1].data *= flat
print unicorn.noNewLine+'%d %s %s' %(i, files[i], flt[0].header['PFLTFILE'])
#
### Segmentation mask
masked = pyfits.open(fi.replace('flt','flt.seg'))[0].data == 0
### DQ mask, hot pixels and the "death star"
dq_ok = (flt[3].data & (4+32+16)) == 0
#
ok = masked & np.isfinite(flt[1].data) & (dq_ok)
#flt[1].data /= np.median(flt[1].data[ok])
flt[1].data /= threedhst.utils.biweight(flt[1].data[ok], mean=True)
flt[1].data[(ok == False)] = 0
X[j,:] = flt[1].data[0:nypix, 0:nxpix].flatten()
#
#pyfits.writeto(files[i].replace('flt','msk').replace('.gz',''), flt[1].data, clobber=True, header=flt[1].header)
#### Average
#nsum = np.sum(X != 0, axis=0).reshape(1014,1014)
#avg = np.sum(X, axis=0).reshape(1014,1014)/nsum
for field in ['COSMOS','GOODS-N','GOODS-S','AEGIS','UDS']:
info = catIO.Readfile('/3DHST/Spectra/Work/%s/PREP_FLT/files.info' %(field))
field_files = info.file[info.filter == 'G141']
fp = open(field+'.g141.list','w')
for ff in field_files:
msk = ff.replace('flt.fits.gz','msk.fits')
if os.path.exists(msk):
fp.write('%s\n' %(msk))
fp.close()
#
iraf.imcombine ( input = '@%s.g141.list' %(field), output = 'combined_g141_%s' %(field),
headers = '', bpmasks = '', rejmasks = '', nrejmasks = '',
expmasks = '', sigmas = '', logfile = 'STDOUT', combine = 'average',
reject = 'minmax', project = iraf.no, outtype = 'real',
outlimits = '', offsets = 'none', masktype = 'none',
maskvalue = '0', blank = 0.0, scale = 'none', zero = 'none',
weight = 'none', statsec = '', expname = '', lthreshold = 0.02,
hthreshold = 20.0, nlow = 3, nhigh = 3, nkeep = 1,
mclip = iraf.yes, lsigma = 3.0, hsigma = 3.0, rdnoise = '0.',
gain = '1.', snoise = '0.', sigscale = 0.1, pclip = -0.5)
fp = open('msk_list','w')
for file in files:
fp.write(file+'\n')
fp.close()
iraf.imcombine ( input = '@msk_list', output = 'combine_masked',
headers = '', bpmasks = '', rejmasks = '', nrejmasks = '',
expmasks = '', sigmas = '', logfile = 'STDOUT', combine = 'average',
reject = 'minmax', project = iraf.no, outtype = 'real',
outlimits = '', offsets = 'none', masktype = 'none',
maskvalue = '0', blank = 0.0, scale = 'none', zero = 'none',
weight = 'none', statsec = '', expname = '', lthreshold = 1e-06,
hthreshold = 100.0, nlow = 5, nhigh = 5, nkeep = 1,
mclip = iraf.yes, lsigma = 3.0, hsigma = 3.0, rdnoise = '0.',
gain = '1.', snoise = '0.', sigscale = 0.1, pclip = -0.5)
sky = pyfits.open('combine_COSMOS.fits')[0].data
# #### Average
# nsum = np.sum(X != 0, axis=0).reshape(nypix,nxpix)
# avg = np.sum(X, axis=0).reshape(nypix,nxpix)/nsum
#
# ### Fill empty pixels with no input images
# sky = avg
x,y = np.where((np.isfinite(sky) == False) | (sky == 0))
NX = len(x)
pad = 1
for i in range(NX):
xi = x[i]
yi = y[i]
sub = sky[xi-pad:xi+pad+2,yi-pad:yi+pad+2]
if (np.sum(sub) != 0.0):
sky[xi,yi] = np.median(sub[np.isfinite(sub)])
still_bad = (np.isfinite(sky) == False) | (sky <= 0.01)
sky[still_bad] = flat[0:nypix, 0:nxpix][still_bad]
# bad_flat = (flat < 0.5)
# sky[bad_flat] = flat[bad_flat]
im_sky = pyfits.PrimaryHDU(data=sky)
im_n = pyfits.ImageHDU(data=nsum)
im = pyfits.HDUList([im_sky, im_n])
im.writeto('sky.fits', clobber=True)
#### for DIRECT flat
flatim = pyfits.open('/3DHST/Spectra/Work/CONF/sky_cosmos.fits')
flatim[0].data = sky
flatim[1].data = sky
#flatim[3].data[5:-5,5:-5] = nsum
flatim.writeto('%s_g141_flat.fits' %(field), clobber=True)
def make_imaging_flat():
"""
Make average background images with object masks
"""
from pyraf import iraf
#files = glob.glob('ibhm*flt.seg.fits')
#PATH = ('/3DHST/Spectra/Work/%s/RAW/' %(field))*len(files)
###################### Grism sky backgrounds
filter, flat_file = 'G141', 'u4m1335mi_pfl.fits'
flat = pyfits.open(IREF+'/'+flat_file)[1].data[5:-5,5:-5] / pyfits.open(IREF+'/flat.IR_avg.fits')[1].data[5:-5,5:-5]
flat[flat <= 0] = 5
flat[flat > 5] = 5
##################### Direct flat-field
filter, flat_file = 'F140W', 'uc721143i_pfl.fits'
filter, flat_file = 'F125W', 'uc72113qi_pfl.fits'
filter, flat_file = 'F160W', 'uc721145i_pfl.fits'
filter, flat_file = 'F105W', 'uc72113oi_pfl.fits'
flat = pyfits.open(IREF+'/'+flat_file)[1].data[5:-5,5:-5]
flat[flat <= 0] = 5
flat[flat > 5] = 5
############### 3D-HST
os.chdir("/3DHST/Spectra/Work/Background")
fields = ['COSMOS','GOODS-N','GOODS-S','AEGIS','UDS']
PREP_FLT = '/3DHST/Spectra/Work/xxx/PREP_FLT/'
RAW = '/3DHST/Spectra/Work/xxx/RAW/'
############### CANDELS
os.chdir('/Users/gbrammer/CANDELS/Flats/')
fields = ['GOODS-S','EGS','UDS']
PREP_FLT = '/Users/gbrammer/CANDELS/xxx/PREP_FLT/'
RAW = '/Users/gbrammer/CANDELS/xxx/RAW/'
PATHS = []
files = []
file_field = []
for field in fields:
info = catIO.Readfile(PREP_FLT.replace('xxx',field)+'files.info')
field_files = info.file[info.filter == filter]
files.extend(field_files)
PATHS.extend([RAW.replace('xxx',field)] * len(field_files))
file_field.extend([field]*len(field_files))
##################
NF = len(files)
idx = np.arange(NF)
## Otherwise get it from "show_profile" above
test = idx > -10
fp = open('background.%s.dat' %(filter),'w')
for j,i in enumerate(idx):
if ~test[i]:
continue
#
fi = files[i]
if not os.path.exists(fi.replace('flt','flt.seg')):
continue
#
if os.path.exists(fi.replace('.gz','')+'.mask.reg'):
continue
#
flt = pyfits.open(PATHS[i]+files[i])
flt[1].data *= flat
print unicorn.noNewLine+'%d %s %s' %(i, files[i], flt[0].header['PFLTFILE'])
#
### Segmentation mask
masked = pyfits.open(fi.replace('flt','flt.seg'))[0].data == 0
### DQ mask, hot pixels and the "death star"
dq_ok = (flt[3].data & (4+32+16)) == 0
#
ok = masked & np.isfinite(flt[1].data) & (dq_ok)
#flt[1].data /= np.median(flt[1].data[ok])
level = threedhst.utils.biweight(flt[1].data[ok], mean=True)
fp.write('%s %s %.3f\n' %(files[i].replace('flt','msk').replace('.gz',''), file_field[i], level))
#
#flt[1].data /= level
#flt[1].data[(ok == False)] = 0
#pyfits.writeto(files[i].replace('flt','msk').replace('.gz',''), flt[1].data, clobber=True, header=flt[1].header)
fp.close() ## background.dat
#
# nsum = np.sum(X != 0, axis=0).reshape(1014,1014)
# avg = np.sum(X, axis=0).reshape(1014,1014)/nsum
# sky = avg
#### Use iraf.imcombine
for field in fields:
info = catIO.Readfile(PREP_FLT.replace('xxx',field)+'files.info')
field_files = info.file[info.filter == filter]
if len(field_files) < 10:
continue
#
fp = open('%s.%s.list' %(field, filter),'w')
for ff in field_files:
msk = ff.replace('flt.fits.gz','msk.fits')
if os.path.exists(msk):
fp.write('%s\n' %(msk))
fp.close()
#
iraf.imcombine ( input = '@%s.%s.list' %(field, filter), output = 'combine.%s.%s' %(field, filter),
headers = '', bpmasks = '', rejmasks = '', nrejmasks = '',
expmasks = '', sigmas = '', logfile = 'STDOUT', combine = 'average',
reject = 'minmax', project = iraf.no, outtype = 'real',
outlimits = '', offsets = 'none', masktype = 'none',
maskvalue = '0', blank = 0.0, scale = 'none', zero = 'none',
weight = 'none', statsec = '', expname = '', lthreshold = 1e-06,
hthreshold = 100.0, nlow = 5, nhigh = 5, nkeep = 1,
mclip = iraf.yes, lsigma = 3.0, hsigma = 3.0, rdnoise = '0.',
gain = '1.', snoise = '0.', sigscale = 0.1, pclip = -0.5)
##### Weight by the square of the background level (more flat signal for higher bg!)
bg_flt, bg_field, bg = np.loadtxt('background.%s.dat' %(filter), dtype=np.str, unpack=True)
weights = np.cast[float](bg)**2
fp = open('%s.list' %(filter),'w')
fpw = open('%s.weight' %(filter),'w')
for msk, wht in zip(bg_flt, weights):
if os.path.exists(msk):
fp.write('%s\n' %(msk))
fpw.write('%.2f\n' %(wht))
fp.close()
fpw.close()
iraf.imcombine ( input = '@%s.list' %(filter), output = 'combine.%s' %(filter),
headers = '', bpmasks = '', rejmasks = '', nrejmasks = '',
expmasks = '', sigmas = '', logfile = 'STDOUT', combine = 'average',
reject = 'minmax', project = iraf.no, outtype = 'real',
outlimits = '', offsets = 'none', masktype = 'none',
maskvalue = '0', blank = 0.0, scale = 'none', zero = 'none',
weight = '@%s.weight' %(filter), statsec = '', expname = '', lthreshold = 1e-06,
hthreshold = 100.0, nlow = 5, nhigh = 5, nkeep = 1,
mclip = iraf.yes, lsigma = 3.0, hsigma = 3.0, rdnoise = '0.',
gain = '1.', snoise = '0.', sigscale = 0.1, pclip = -0.5)
##### Final processing
combined_files = glob.glob('combine*%s*fits' %(filter))
for file in combined_files:
sky = pyfits.open(file)[0].data
#
##### Fix bad pixels
if filter != 'G141':
ratio = sky/flat
stats = threedhst.utils.biweight(ratio[np.isfinite(ratio)], both=True)
sky = sky/stats[0]
max = stats[1]*5
else:
max = 10
#
x,y = np.where((np.isfinite(sky) == False) | (sky/flat > (1+max)) | (sky == 0))
NX = len(x)
print '%s: N_fix = %d' %(file, NX)
pad = 1
for i in range(NX):
xi = x[i]
yi = y[i]
sub = sky[xi-pad:xi+pad+2,yi-pad:yi+pad+2]
if (np.sum(sub) != 0.0):
sky[xi,yi] = np.median(sub[np.isfinite(sub)])
#
still_bad = (np.isfinite(sky) == False) | (sky <= 0.01)
sky[still_bad] = flat[still_bad]
#
#### for DIRECT flat
if filter == 'G141':
flatim = pyfits.open(unicorn.GRISM_HOME + 'CONF/sky_cosmos.fits')
flatim[0].data = sky
#flatim[3].data[5:-5,5:-5] = nsum
flatim.writeto(file.replace('combine','sky'), clobber=True)
else:
flatim = pyfits.open(IREF+'/'+flat_file)
flatim[1].data[5:-5,5:-5] = sky
#flatim[3].data[5:-5,5:-5] = nsum
flatim.writeto(file.replace('combine','flat'), clobber=True)
def combine_subset(filter='G141', idx=np.array([0]), root='set1', use_scaled=True):
"""
Subset, get index array of objects to use from the "show_profile" function above
"""
from pyraf import iraf
bg_flt, bg_field, bg_val = np.loadtxt('background.%s.dat' %(filter), dtype=np.str, unpack=True)
weights = np.cast[float](bg_val)**2
fp = open('%s.%s.list' %(filter, root),'w')
fpw = open('%s.%s.weight' %(filter, root),'w')
for msk, wht in zip(bg_flt[idx], weights[idx]):
if os.path.exists(msk):
if use_scaled:
img = msk.replace('msk','msk.s')
else:
img = msk
fp.write('%s\n' %(img))
fpw.write('%.4f\n' %(wht))
#
fp.close()
fpw.close()
iraf.imcombine ( input = '@%s.%s.list' %(filter, root), output = 'combine.%s.%s' %(filter, root),
headers = '', bpmasks = '', rejmasks = '', nrejmasks = '',
expmasks = '', sigmas = '', logfile = 'STDOUT', combine = 'average',
reject = 'minmax', project = iraf.no, outtype = 'real',
outlimits = '', offsets = 'none', masktype = 'none',
maskvalue = '0', blank = 0.0, scale = 'none', zero = 'none',
weight = '@%s.%s.weight' %(filter, root), statsec = '', expname = '', lthreshold = 1e-04,
hthreshold = 100.0, nlow = 2, nhigh = 2, nkeep = 1,
mclip = iraf.yes, lsigma = 3.0, hsigma = 3.0, rdnoise = '0.',
gain = '1.', snoise = '0.', sigscale = 0.1, pclip = -0.5)
def make_average_flat_for_grism():
"""
Take the average of the master flats made from the CANDELS F125W and F160W images
"""
os.chdir('/Users/gbrammer/CANDELS/Flats/')
f125 = pyfits.open('flat.F125W.fits')
f160 = pyfits.open('flat.F160W.fits')
avg = f125[1].data*0.5+f160[1].data*0.5
f125[1].data = avg
f125.writeto('flat.IR_avg.fits', clobber=True)
|
mit
|
harshaneelhg/scikit-learn
|
sklearn/learning_curve.py
|
110
|
13467
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
bsd-3-clause
|
akloster/bokeh
|
bokeh/charts/builder/tests/test_step_builder.py
|
33
|
2495
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Step
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestStep(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 3, 7, 5, 26]
xyvalues['pypy'] = [12, 33, 47, 15, 126]
xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [ 2., 2., 3., 3., 7., 7., 5., 5., 26.]
y_jython = [ 22., 22.,43., 43., 10., 10., 25., 25., 26.]
y_pypy = [ 12., 12., 33., 33., 47., 47., 15., 15., 126.]
x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
|
bsd-3-clause
|
sppalkia/weld
|
examples/python/grizzly/get_population_stats_simplified_grizzly.py
|
3
|
1266
|
#!/usr/bin/python
# The usual preamble
import numpy as np
import grizzly.numpy_weld as npw
import pandas as pd
import grizzly.grizzly as gr
import time
# Get data (NYC 311 service request dataset) and start cleanup
raw_data = pd.read_csv('data/us_cities_states_counties.csv', delimiter='|')
raw_data.dropna(inplace=True)
data = gr.DataFrameWeld(raw_data)
print "Done reading input file..."
start = time.time()
# Get all city information with total population greater than 500,000
data_big_cities = data[data["Total population"] > 500000]
# Compute "crime index" proportional to
# (Total population + 2*(Total adult population) - 2000*(Number of robberies)) / 100000
data_big_cities_stats = data_big_cities[
["Total population", "Total adult population", "Number of robberies"]].values
predictions = npw.dot(data_big_cities_stats, np.array(
[1, 2, -2000], dtype=np.int64)) / 100000.0
data_big_cities["Crime index"] = predictions
# Aggregate "crime index" scores by state
data_big_cities["Crime index"][data_big_cities["Crime index"] >= 0.02] = 0.032
data_big_cities["Crime index"][data_big_cities["Crime index"] < 0.01] = 0.005
print data_big_cities["Crime index"].sum().evaluate()
end = time.time()
print "Total end-to-end time: %.2f" % (end - start)
|
bsd-3-clause
|
emunsing/pyiso
|
pyiso/isone.py
|
1
|
11304
|
from pyiso.base import BaseClient
from pyiso import LOGGER
from os import environ
import pandas as pd
class ISONEClient(BaseClient):
NAME = 'ISONE'
base_url = 'https://webservices.iso-ne.com/api/v1.1'
TZ_NAME = 'America/New_York'
fuels = {
'Coal': 'coal',
'Hydro': 'hydro',
'Natural Gas': 'natgas',
'Nuclear': 'nuclear',
'Oil': 'oil',
'Other': 'other',
'Solar': 'solar',
'Wind': 'wind',
'Wood': 'biomass',
'Refuse': 'refuse',
'Landfill Gas': 'biogas',
}
locations = {
'INTERNALHUB': 4000,
'MAINE': 4001,
'NEWHAMPSHIRE': 4002,
'VERMONT': 4003,
'CONNECTICUT': 4004,
'RHODEISLAND': 4005,
'SEMASS': 4006,
'WCMASS': 4007,
'NEMASSBOST': 4008,
}
def __init__(self, *args, **kwargs):
super(ISONEClient, self).__init__(*args, **kwargs)
try:
self.auth = (environ['ISONE_USERNAME'], environ['ISONE_PASSWORD'])
except KeyError:
msg = 'Must define environment variables ISONE_USERNAME and ISONE_PASSWORD to use ISONE client.'
raise RuntimeError(msg)
def get_generation(self, latest=False, start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='gen', latest=latest,
start_at=start_at, end_at=end_at, **kwargs)
# set up storage
raw_data = []
parsed_data = []
# collect raw data
for endpoint in self.request_endpoints():
# carry out request
data = self.fetch_data(endpoint, self.auth)
# pull out data
try:
raw_data += data['GenFuelMixes']['GenFuelMix']
except KeyError as e:
LOGGER.warn(e)
continue
# parse data
try:
df = self._parse_json(raw_data)
except ValueError:
return []
df = self.slice_times(df)
# return
return self.serialize_faster(df, drop_index=True)
def get_load(self, latest=False, start_at=False, end_at=False,
forecast=False, **kwargs):
# set args
self.handle_options(data='load', latest=latest, forecast=forecast,
start_at=start_at, end_at=end_at, **kwargs)
# set up storage
raw_data = []
# collect raw data
for endpoint in self.request_endpoints():
# carry out request
data = self.fetch_data(endpoint, self.auth)
# pull out data
try:
raw_data += self.parse_json_load_data(data)
except ValueError as e:
LOGGER.warn(e)
continue
# parse data
try:
df = self._parse_json(raw_data)
except ValueError:
return []
df = self.slice_times(df)
# return
return self.serialize_faster(df, drop_index=True)
def handle_options(self, **kwargs):
# default options
super(ISONEClient, self).handle_options(**kwargs)
# handle market
if not self.options.get('market'):
if self.options['data'] == 'gen':
# generation on n/a market
self.options['market'] = self.MARKET_CHOICES.na
else:
# load on real-time 5-min or hourly forecast
if self.options['forecast']:
self.options['market'] = self.MARKET_CHOICES.dam
else:
self.options['market'] = self.MARKET_CHOICES.fivemin
# handle frequency
if not self.options.get('frequency'):
if self.options['data'] == 'gen':
# generation on n/a frequency
self.options['frequency'] = self.FREQUENCY_CHOICES.na
else:
# load on real-time 5-min or hourly forecast
if self.options['market'] == self.MARKET_CHOICES.dam:
self.options['frequency'] = self.FREQUENCY_CHOICES.dam
else:
self.options['frequency'] = self.FREQUENCY_CHOICES.fivemin
# handle lmp
if self.options['data'] == 'lmp':
if self.options['market'] == self.MARKET_CHOICES.fivemin:
if self.options['forecast']:
raise ValueError('ISONE does not produce forecast five minute lmps')
def request_endpoints(self, location_id=None):
"""Returns a list of endpoints to query, based on handled options"""
# base endpoint
ext = ''
if self.options['data'] == 'gen':
base_endpoint = 'genfuelmix'
elif self.options['data'] == 'lmp' and location_id is not None:
ext = '/location/%s' % location_id
if self.options['market'] == self.MARKET_CHOICES.fivemin:
base_endpoint = 'fiveminutelmp'
elif self.options['market'] == self.MARKET_CHOICES.dam:
base_endpoint = 'hourlylmp/da/final'
elif self.options['market'] == self.MARKET_CHOICES.hourly:
base_endpoint = 'hourlylmp/rt/prelim'
elif self.options['data'] == 'load':
if self.options['market'] == self.MARKET_CHOICES.dam:
base_endpoint = 'hourlyloadforecast'
else:
base_endpoint = 'fiveminutesystemload'
else:
raise ValueError('Data type not recognized %s' % self.options['data'])
# set up storage
request_endpoints = []
# handle dates
if self.options['latest']:
request_endpoints.append('/%s/current%s.json' % (base_endpoint, ext))
elif self.options['start_at'] and self.options['end_at']:
for date in self.dates():
date_str = date.strftime('%Y%m%d')
request_endpoints.append('/%s/day/%s%s.json' % (base_endpoint, date_str, ext))
else:
msg = 'Either latest or forecast must be True, or start_at and end_at must both be provided.'
raise ValueError(msg)
# return
return request_endpoints
def fetch_data(self, endpoint, auth):
url = self.base_url + endpoint
response = self.request(url, auth=auth)
if response:
return response.json()
else:
return {}
def parse_json_load_data(self, data):
"""
Pull approriate keys from json data set.
Raise ValueError if parser fails.
"""
try:
if self.options.get('latest'):
return data['FiveMinSystemLoad']
elif self.options['market'] == self.MARKET_CHOICES.dam:
return data['HourlyLoadForecasts']['HourlyLoadForecast']
else:
return data['FiveMinSystemLoads']['FiveMinSystemLoad']
except (KeyError, TypeError):
raise ValueError('Could not parse ISONE load data %s' % data)
def parse_json_lmp_data(self, data):
"""
Pull approriate keys from json data set.
Raise ValueError if parser fails.
"""
try:
if self.options['market'] == self.MARKET_CHOICES.fivemin:
if self.options.get('latest'):
return data['FiveMinLmp']
else:
return data['FiveMinLmps']['FiveMinLmp']
else:
return data['HourlyLmps']['HourlyLmp']
except (KeyError, TypeError):
raise ValueError('Could not parse ISONE lmp data %s' % data)
def _parse_json(self, json):
if len(json) == 0:
raise ValueError('No data found for ISONE %s' % self.options)
df = pd.DataFrame(json)
# Get datetimes
df.index = df['BeginDate']
df.index = pd.to_datetime(df.index, utc=True)
df['timestamp'] = df.index
# other attributes
df['ba_name'] = self.NAME
df['market'] = self.options['market']
df['freq'] = self.options['frequency']
# lmp specific
if self.options['data'] == 'lmp':
df.rename(columns={'LmpTotal': 'lmp'}, inplace=True)
df['node_id'] = self.options['node_id']
df['lmp_type'] = 'energy'
# genmix specific
if self.options['data'] == 'gen':
df.rename(columns={'GenMw': 'gen_MW'}, inplace=True)
df['fuel_name'] = df['FuelCategory'].apply(lambda x: self.fuels[x])
# load specific
if self.options['data'] == 'load':
df.rename(columns={'LoadMw': 'load_MW'}, inplace=True)
# drop unwanted columns
df.drop(['BeginDate',
'CongestionComponent', 'EnergyComponent', 'LossComponent', 'Location',
'FuelCategory', 'MarginalFlag', 'FuelCategoryRollup',
'NetLoadMw', 'CreationDate', 'NativeLoad', 'ArdDemand',
],
axis=1, inplace=True, errors='ignore')
return df
def get_lmp(self, node_id='INTERNALHUB', latest=True, start_at=False, end_at=False, **kwargs):
# set args
self.handle_options(data='lmp', latest=latest,
start_at=start_at, end_at=end_at, node_id=node_id, **kwargs)
# get location id
try:
locationid = self.locations[node_id.upper()]
except KeyError:
raise ValueError('No LMP data available for location %s' % node_id)
# set up storage
raw_data = []
# collect raw data
for endpoint in self.request_endpoints(locationid):
# carry out request
data = self.fetch_data(endpoint, self.auth)
# pull out data
try:
raw_data += self.parse_json_lmp_data(data)
except ValueError as e:
LOGGER.warn(e)
continue
# parse and slice
df = self._parse_json(raw_data)
df = self.slice_times(df)
# return
return df.to_dict(orient='record')
def get_morningreport(self, day=None):
"""
Retrieve the morning report
:param str day: Retrieve the Morning Report for a specific day (optional).
format: YYYYMMDD
:rtype: dict
"""
endpoint = "/morningreport/current.json"
if day is not None:
if len(day) != 8:
raise ValueError("The day parameters should be a string with the format YYYYMMDD")
endpoint = "/morningreport/day/%s.json" % day
data = self.fetch_data(endpoint, self.auth)
return data
def get_sevendayforecast(self, day=None):
"""
Retrieve the seven day forecast
:param str day: Retrieve the Seven Day Forecast for a specific day (optional).
format: YYYYMMDD
:rtype: dict
"""
endpoint = "/sevendayforecast/current.json"
if day is not None:
if len(day) != 8:
raise ValueError("The day parameters should be a string with the format YYYYMMDD")
endpoint = "/sevendayforecast/day/%s.json" % day
data = self.fetch_data(endpoint, self.auth)
return data
|
apache-2.0
|
ssaeger/scikit-learn
|
examples/applications/face_recognition.py
|
48
|
5691
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset:
================== ============ ======= ========== =======
precision recall f1-score support
================== ============ ======= ========== =======
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
================== ============ ======= ========== =======
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
|
bsd-3-clause
|
googlearchive/rgc-models
|
response_model/python/population_subunits/coarse/analysis/whole_population_fixed_tf_analyse_relu_window.py
|
1
|
17228
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Analyse the results of subunit fitting.
"""
import sys
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
import re # regular expression matching
FLAGS = flags.FLAGS
flags.DEFINE_float('lam_w', 0.0001, 'sparsitiy regularization of w')
flags.DEFINE_float('lam_a', 0.0001, 'sparsitiy regularization of a')
flags.DEFINE_integer('ratio_SU', 7, 'ratio of subunits/cells')
flags.DEFINE_float('su_grid_spacing', 3, 'grid spacing')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_float('eta_w', 1e-3, 'learning rate for optimization functions')
flags.DEFINE_float('eta_a', 1e-2, 'learning rate for optimization functions')
flags.DEFINE_float('bias_init_scale', -1, 'bias initialized at scale*std')
flags.DEFINE_string('model_id', 'relu_window', 'which model to learn?');
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('batchsz', 100, 'batch size for training')
flags.DEFINE_integer('n_chunks', 216, 'number of data chunks') # should be 216
flags.DEFINE_integer('n_b_in_c', 10, 'number of batches in one chunk of data')
flags.DEFINE_integer('window', 3, 'size of window for each subunit in relu_window model')
flags.DEFINE_integer('stride', 3, 'stride for relu_window')
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
def main(argv):
#plt.ion() # interactive plotting
window = FLAGS.window
n_pix = (2* window + 1) ** 2
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
nCells = 107
# load model
# load filename
print(FLAGS.model_id)
with tf.Session() as sess:
if FLAGS.model_id == 'relu':
# lam_c(X) = sum_s(a_cs relu(k_s.x)) , a_cs>0
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
w = tf.Variable(np.array(np.random.randn(3200,749), dtype='float32'))
a = tf.Variable(np.array(np.random.randn(749,107), dtype='float32'))
if FLAGS.model_id == 'relu_window':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32')) # exp 5
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother_sfm':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother_sfm_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.01+ 0.005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.02+np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.001+ 0.0005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.002*np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'exp_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.001+ 0.0005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.002*np.random.rand(dimx*dimy, nCells),dtype='float32'))
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
FLAGS.save_location = parent_folder +short_filename + '/'
# get relevant files
file_list = gfile.ListDirectory(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
print('\nLoading: ', save_filename)
bin_files = []
meta_files = []
for file_n in file_list:
if re.search(short_filename + '.', file_n):
if re.search('.meta', file_n):
meta_files += [file_n]
else:
bin_files += [file_n]
#print(bin_files)
print(len(meta_files), len(bin_files), len(file_list))
# get iteration numbers
iterations = np.array([])
for file_name in bin_files:
try:
iterations = np.append(iterations, int(file_name.split('/')[-1].split('-')[-1]))
except:
print('Could not load filename: ' + file_name)
iterations.sort()
print(iterations)
iter_plot = iterations[-1]
print(int(iter_plot))
# load tensorflow variables
saver_var = tf.train.Saver(tf.all_variables())
restore_file = save_filename + '-' + str(int(iter_plot))
saver_var.restore(sess, restore_file)
# plot subunit - cell connections
plt.figure()
plt.cla()
plt.imshow(a.eval(), cmap='gray', interpolation='nearest')
print(np.shape(a.eval()))
plt.title('Iteration: ' + str(int(iter_plot)))
plt.show()
plt.draw()
# plot all subunits on 40x80 grid
try:
wts = w.eval()
for isu in range(100):
fig = plt.subplot(10, 10, isu+1)
plt.imshow(np.reshape(wts[:, isu],[40, 80]), interpolation='nearest', cmap='gray')
plt.title('Iteration: ' + str(int(iter_plot)))
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
except:
print('w full does not exist? ')
# plot a few subunits - wmother + wdel
try:
wts = w.eval()
print('wts shape:', np.shape(wts))
icnt=1
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
fig = plt.subplot(dimx, dimy, icnt)
plt.imshow(np.reshape(np.squeeze(wts[idimx, idimy, :]), (2*window+1,2*window+1)), interpolation='nearest', cmap='gray')
icnt = icnt+1
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.show()
plt.draw()
except:
print('w does not exist?')
# plot wmother
try:
w_mot = np.squeeze(w_mother.eval())
print(w_mot)
plt.imshow(w_mot, interpolation='nearest', cmap='gray')
plt.title('Mother subunit')
plt.show()
plt.draw()
except:
print('w mother does not exist')
# plot wmother + wdel
try:
w_mot = np.squeeze(w_mother.eval())
w_del = np.squeeze(w_del.eval())
wts = np.array(np.random.randn(dimx, dimy, (2*window +1)**2))
for idimx in np.arange(dimx):
print(idimx)
for idimy in np.arange(dimy):
wts[idimx, idimy, :] = np.ndarray.flatten(w_mot) + w_del[idimx, idimy, :]
except:
print('w mother + w delta do not exist? ')
'''
try:
icnt=1
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
fig = plt.subplot(dimx, dimy, icnt)
plt.imshow(np.reshape(np.squeeze(wts[idimx, idimy, :]), (2*window+1,2*window+1)), interpolation='nearest', cmap='gray')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
except:
print('w mother + w delta plotting error? ')
# plot wdel
try:
w_del = np.squeeze(w_del.eval())
icnt=1
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
fig = plt.subplot(dimx, dimy, icnt)
plt.imshow( np.reshape(w_del[idimx, idimy, :], (2*window+1,2*window+1)), interpolation='nearest', cmap='gray')
icnt = icnt+1
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
except:
print('w delta do not exist? ')
plt.suptitle('Iteration: ' + str(int(iter_plot)))
plt.show()
plt.draw()
'''
# select a cell, and show its subunits.
#try:
## Load data summary, get mask
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
stas = data_summary['stas']
print(np.shape(total_mask))
# a is 2D
a_eval = a.eval()
print(np.shape(a_eval))
# get softmax numpy
if FLAGS.model_id == 'relu_window_mother_sfm' or FLAGS.model_id == 'relu_window_mother_sfm_exp':
b = np.exp(a_eval) / np.sum(np.exp(a_eval),0)
else:
b = a_eval
plt.figure();
plt.imshow(b, interpolation='nearest', cmap='gray')
plt.show()
plt.draw()
# plot subunits for multiple cells.
n_cells = 10
n_plots_max = 20
plt.figure()
for icell_cnt, icell in enumerate(np.arange(n_cells)):
mask2D = np.reshape(total_mask[icell,: ], [40, 80])
nz_idx = np.nonzero(mask2D)
np.shape(nz_idx)
print(nz_idx)
ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1])
xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1])
icnt = -1
a_thr = np.percentile(np.abs(b[:, icell]), 99.5)
n_plots = np.sum(np.abs(b[:, icell]) > a_thr)
nx = np.ceil(np.sqrt(n_plots)).astype('int')
ny = np.ceil(np.sqrt(n_plots)).astype('int')
ifig=0
ww_sum = np.zeros((40,80))
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
icnt = icnt + 1
if(np.abs(b[icnt,icell]) > a_thr):
ifig = ifig + 1
fig = plt.subplot(n_cells, n_plots_max, icell_cnt*n_plots_max + ifig + 2)
ww = np.zeros((40,80))
ww[idimx*FLAGS.stride: idimx*FLAGS.stride + (2*window+1),
idimy*FLAGS.stride: idimy*FLAGS.stride + (2*window+1)] = b[icnt, icell] * (np.reshape(wts[idimx, idimy, :],
(2*window+1,2*window+1)))
plt.imshow(ww, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
plt.title(b[icnt,icell])
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ww_sum = ww_sum + ww
fig = plt.subplot(n_cells, n_plots_max, icell_cnt*n_plots_max + 2)
plt.imshow(ww_sum, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('STA from model')
fig = plt.subplot(n_cells, n_plots_max, icell_cnt*n_plots_max + 1)
plt.imshow(np.reshape(stas[:, icell], [40, 80]), interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('True STA')
plt.show()
plt.draw()
#except:
# print('a not 2D?')
# using xlim and ylim, and plot the 'windows' which are relevant with their weights
sq_flat = np.zeros((dimx, dimy))
icnt = 0
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
sq_flat[idimx, idimy] = icnt
icnt = icnt + 1
n_cells = 1
n_plots_max = 10
plt.figure()
for icell_cnt, icell in enumerate(np.array([1, 2, 3, 4, 5])):#enumerate(np.arange(n_cells)):
a_thr = np.percentile(np.abs(b[:, icell]), 99.5)
mask2D = np.reshape(total_mask[icell,: ], [40, 80])
nz_idx = np.nonzero(mask2D)
np.shape(nz_idx)
print(nz_idx)
ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1])
xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1])
print(xlim, ylim)
win_startx = np.ceil((xlim[0] - (2*window+1)) / FLAGS.stride)
win_endx = np.floor((xlim[1]-1) / FLAGS.stride )
win_starty = np.ceil((ylim[0] - (2*window+1)) / FLAGS.stride)
win_endy = np.floor((ylim[1]-1) / FLAGS.stride )
dimx_plot = win_endx - win_startx + 1
dimy_plot = win_endy - win_starty + 1
ww_sum = np.zeros((40,80))
for irow, idimy in enumerate(np.arange(win_startx, win_endx+1)):
for icol, idimx in enumerate(np.arange(win_starty, win_endy+1)):
fig = plt.subplot(dimx_plot+1, dimy_plot, (irow + 1) * dimy_plot + icol+1 )
ww = np.zeros((40,80))
ww[idimx*FLAGS.stride: idimx*FLAGS.stride + (2*window+1),
idimy*FLAGS.stride: idimy*FLAGS.stride + (2*window+1)] = (np.reshape(wts[idimx, idimy, :],
(2*window+1,2*window+1)))
plt.imshow(ww, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
if b[sq_flat[idimx, idimy],icell] > a_thr:
plt.title(b[sq_flat[idimx, idimy],icell], fontsize=10, color='g')
else:
plt.title(b[sq_flat[idimx, idimy],icell], fontsize=10, color='r')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ww_sum = ww_sum + ww * b[sq_flat[idimx, idimy],icell]
fig = plt.subplot(dimx_plot+1, dimy_plot, 2)
plt.imshow(ww_sum, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('STA from model')
fig = plt.subplot(dimx_plot+1, dimy_plot, 1)
plt.imshow(np.reshape(stas[:, icell], [40, 80]), interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('True STA')
plt.show()
plt.draw()
if __name__ == '__main__':
app.run()
|
apache-2.0
|
chenjun0210/tensorflow
|
tensorflow/examples/learn/multiple_gpu.py
|
49
|
3078
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
sonnyhu/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
42
|
14316
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to column
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specified number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
luispedro/BuildingMachineLearningSystemsWithPython
|
ch02/figure1.py
|
22
|
1199
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from matplotlib import pyplot as plt
# We load the data with load_iris from sklearn
from sklearn.datasets import load_iris
# load_iris returns an object with several fields
data = load_iris()
features = data.data
feature_names = data.feature_names
target = data.target
target_names = data.target_names
fig,axes = plt.subplots(2, 3)
pairs = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
# Set up 3 different pairs of (color, marker)
color_markers = [
('r', '>'),
('g', 'o'),
('b', 'x'),
]
for i, (p0, p1) in enumerate(pairs):
ax = axes.flat[i]
for t in range(3):
# Use a different color/marker for each class `t`
c,marker = color_markers[t]
ax.scatter(features[target == t, p0], features[
target == t, p1], marker=marker, c=c)
ax.set_xlabel(feature_names[p0])
ax.set_ylabel(feature_names[p1])
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
fig.savefig('figure1.png')
|
mit
|
aledionigi/trading-with-python
|
historicDataDownloader/historicDataDownloader.py
|
77
|
4526
|
'''
Created on 4 aug. 2012
Copyright: Jev Kuznetsov
License: BSD
a module for downloading historic data from IB
'''
import ib
import pandas
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from time import sleep
import tradingWithPython.lib.logger as logger
from pandas import DataFrame, Index
import datetime as dt
from timeKeeper import TimeKeeper
import time
timeFormat = "%Y%m%d %H:%M:%S"
class DataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = DataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1800 S',barSizeSetting='1 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(1)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self,contract, dateTuple ):
''' get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
'''
openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime,closeTime,freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract,t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
if __name__=='__main__':
dl = Downloader(debug=True)
c = Contract()
c.m_symbol = 'SPY'
c.m_secType = 'STK'
c.m_exchange = 'SMART'
c.m_currency = 'USD'
df = dl.getIntradayData(c, (2012,8,6))
df.to_csv('test.csv')
# df = dl.requestData(c, '20120803 22:00:00')
# df.to_csv('test1.csv')
# df = dl.requestData(c, '20120803 21:30:00')
# df.to_csv('test2.csv')
dl.disconnect()
print 'Done.'
|
bsd-3-clause
|
awicenec/wzfp
|
src/digital_noise_image.py
|
1
|
5340
|
#!/usr/local/bin/python2.7
# encoding: utf-8
'''
digital_noise_image -- Script produces a digital noise image from DP an SP rounding differences
digital_noise_image is a main program using numarray and matplotlib
It defines classes_and_methods
@author: [email protected]
@copyright: 2015 ICRAR/UWA. All rights reserved.
@license: Apache
@contact: [email protected]
@deffield updated: Updated
'''
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
__all__ = []
__version__ = 0.1
__date__ = '2015-09-15'
__updated__ = '2015-09-15'
DEBUG = 0
TESTRUN = 0
PROFILE = 0
class CLIError(Exception):
'''Generic exception to raise and log different fatal errors.'''
def __init__(self, msg):
super(CLIError).__init__(type(self))
self.msg = "E: %s" % msg
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
def img_rounding_noise(psize=100):
"""
Generate digital noise image from difference of SP and DP arrays.
The SP array is converted to DP and then the difference is calculated.
This effect is due to rounding differences between SP and DP.
INPUTS:
psize: int, size of image in pixels/side
RETURNS:
the image as a numpy array (psize,psize).dtype=np.float64
"""
na=np.array(np.random.sample(psize**2)*100,dtype=np.int8) # random integers [0..100[
r32 = np.array(na, dtype=np.float32)/np.pi # produce SP array
r64 = np.array(na, dtype=np.float64)/np.pi # produce DP array
r32_64=np.array(r32,dtype=np.float64) # convert SP array to DP
d64 = r64 - r32_64 # difference between the two (should be == 0!)
d32 = np.array(d64,dtype=np.float32)
img = d32.reshape(psize,psize) # make 2-D
img += np.abs(img.min()) # shift to all positive
img = img/np.max(img) # normalise to maximum difference
return img
def main(argv=None):
'''Command line options.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by [email protected] on %s.
Copyright 2015 ICRAR/UWA. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-s", "--size", type=int, dest="psize", default=100, help="size of image pixels/side [default: %(default)s]")
parser.add_argument("-v", "--verbose", dest="verbose", default=0, action="count", help="set verbosity level [default: %(default)s]")
parser.add_argument("-m", "--method", dest="method", default='img_rounding_noise', help="set method to be used [default: %(default)s]")
parser.add_argument("-l", "--list", dest="lm", action="count", default=False, help="list methods [default: %(default)s]")
parser.add_argument('-V', '--version', action='version', version=program_version_message)
# parser.add_argument(dest="paths", help="paths to folder(s) with source file(s) [default: %(default)s]", metavar="path", nargs='+')
# Process arguments
args = parser.parse_args()
psize = args.psize
verbose = args.verbose
method = args.method
if verbose > 0:
print("Verbose mode on")
if psize:
print("Pixel size: %d" % psize)
else:
print("Default size 100")
if args.lm:
methods = filter(lambda x:x[0:4]=='img_',globals().keys())
for m in methods:
print m.__doc__
return 0
img_producer = globals()[method]
img = img_producer(psize=psize)
plt.imshow(img)
plt.show()
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception, e:
if DEBUG or TESTRUN:
raise(e)
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-v")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'digital_noise_image_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main())
|
lgpl-3.0
|
jaidevd/scikit-learn
|
sklearn/neighbors/regression.py
|
26
|
10999
|
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
sureshthalamati/spark
|
python/pyspark/sql/functions.py
|
2
|
89444
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import math
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
from pyspark.sql.udf import UserDefinedFunction, _create_udf
def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# users might write ints for simplicity. This would throw an error on the JVM side.
jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1),
col2._jc if isinstance(col2, Column) else float(col2))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_functions = {
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to upper case.',
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4 = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6 = {
# unary math functions
'stddev': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: returns the population variance of the values in a group.',
'var_samp': 'Aggregate function: returns the unbiased variance of the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1 = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
'toDegrees': 'Deprecated in 2.1, use degrees instead.',
'toRadians': 'Deprecated in 2.1, use radians instead.',
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_1_4.items():
globals()[_name] = since(1.4)(_create_function(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6.items():
globals()[_name] = since(1.6)(_create_function(_name, _doc))
for _name, _doc in _functions_2_1.items():
globals()[_name] = since(2.1)(_create_function(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
del _name, _doc
@since(1.3)
def approxCountDistinct(col, rsd=None):
"""
.. note:: Deprecated in 2.1, use :func:`approx_count_distinct` instead.
"""
warnings.warn("Deprecated in 2.1, use approx_count_distinct instead.", DeprecationWarning)
return approx_count_distinct(col, rsd)
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1`` and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=1.1568609015300986),
Row(age=5, name=u'Bob', rand=1.403379671529166)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=-0.7556247885860078),
Row(age=5, name=u'Bob', randn=-0.0861619008451133)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), count, default))
@since(1.4)
def lead(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), count, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
@since(2.4)
def unboundedPreceding():
"""
Window function: returns the special frame boundary that represents the first row
in the window partition.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unboundedPreceding())
@since(2.4)
def unboundedFollowing():
"""
Window function: returns the special frame boundary that represents the last row
in the window partition.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unboundedFollowing())
@since(2.4)
def currentRow():
"""
Window function: returns the special frame boundary that represents the current row
in the window partition.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.currentRow())
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.text.SimpleDateFormat` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2):
"""
Returns the number of months between date1 and date2.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.9495967...)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(_to_java_column(date1), _to_java_column(date2)))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`SimpleDateFormats <http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html>`_.
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`SimpleDateFormats <http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html>`_.
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders
that time as a timestamp in the given time zone. For example, 'GMT+1' would yield
'2017-07-14 03:40:00.0'.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(from_utc_timestamp(df.t, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time
zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield
'2017-07-14 01:40:00.0'.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['ts'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'initcap': 'Returns a new string column by converting the first letter of each word to ' +
'uppercase. Words are delimited by whitespace.',
'lower': 'Converts a string column to lower case.',
'upper': 'Converts a string column to upper case.',
'reverse': 'Reverses the string column and returns it as a new string column.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
If all inputs are binary, concat returns an output as binary. Otherwise, it returns as string.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern):
"""
Splits str around pattern (pattern is a regular expression).
.. note:: pattern is a string represent the regular expression.
>>> df = spark.createDataFrame([('ab12cd',)], ['s',])
>>> df.select(split(df.s, '[0-9]+').alias('s')).collect()
[Row(s=[u'ab', u'cd'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', '(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', '(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', '(\\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(1.4)
def explode(col):
"""Returns a new row for each element in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""Returns a new row for each element with position in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`StructType` or :class:`ArrayType`
of :class:`StructType`\\s with the specified schema. Returns `null`, in the case of an
unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, options)
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` of
:class:`StructType`\\s, a :class:`MapType` or :class:`ArrayType` of :class:`MapType`\\s
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing the struct, array of the structs, the map or
array of the maps.
:param options: options to control converting. accepts the same options as the json datasource
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), options)
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The returnType should be a primitive data type, e.g., :class:`DoubleType`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key will be passed as the first argument and the data will
be passed as the second argument. The grouping key will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key in the function.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> import pandas as pd # doctest: +SKIP
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
3. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`ArrayType`, :class:`MapType` and :class:`StructType` are currently not supported as
output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg`
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
.. seealso:: :meth:`pyspark.sql.GroupedData.agg`
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
"""
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/indexes/interval/test_interval_new.py
|
2
|
10996
|
from __future__ import division
import numpy as np
import pytest
from pandas import Int64Index, Interval, IntervalIndex
import pandas.util.testing as tm
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
class TestIntervalIndex(object):
@pytest.mark.parametrize("side", ['right', 'left', 'both', 'neither'])
def test_get_loc_interval(self, closed, side):
idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
for bound in [[0, 1], [1, 2], [2, 3], [3, 4],
[0, 2], [2.5, 3], [-1, 4]]:
# if get_loc is supplied an interval, it should only search
# for exact matches, not overlaps or covers, else KeyError.
if closed == side:
if bound == [0, 1]:
assert idx.get_loc(Interval(0, 1, closed=side)) == 0
elif bound == [2, 3]:
assert idx.get_loc(Interval(2, 3, closed=side)) == 1
else:
with pytest.raises(KeyError):
idx.get_loc(Interval(*bound, closed=side))
else:
with pytest.raises(KeyError):
idx.get_loc(Interval(*bound, closed=side))
@pytest.mark.parametrize("scalar", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5])
def test_get_loc_scalar(self, closed, scalar):
# correct = {side: {query: answer}}.
# If query is not in the dict, that query should raise a KeyError
correct = {'right': {0.5: 0, 1: 0, 2.5: 1, 3: 1},
'left': {0: 0, 0.5: 0, 2: 1, 2.5: 1},
'both': {0: 0, 0.5: 0, 1: 0, 2: 1, 2.5: 1, 3: 1},
'neither': {0.5: 0, 2.5: 1}}
idx = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
# if get_loc is supplied a scalar, it should return the index of
# the interval which contains the scalar, or KeyError.
if scalar in correct[closed].keys():
assert idx.get_loc(scalar) == correct[closed][scalar]
else:
pytest.raises(KeyError, idx.get_loc, scalar)
def test_slice_locs_with_interval(self):
# increasing monotonically
index = IntervalIndex.from_tuples([(0, 2), (1, 3), (2, 4)])
assert index.slice_locs(
start=Interval(0, 2), end=Interval(2, 4)) == (0, 3)
assert index.slice_locs(start=Interval(0, 2)) == (0, 3)
assert index.slice_locs(end=Interval(2, 4)) == (0, 3)
assert index.slice_locs(end=Interval(0, 2)) == (0, 1)
assert index.slice_locs(
start=Interval(2, 4), end=Interval(0, 2)) == (2, 1)
# decreasing monotonically
index = IntervalIndex.from_tuples([(2, 4), (1, 3), (0, 2)])
assert index.slice_locs(
start=Interval(0, 2), end=Interval(2, 4)) == (2, 1)
assert index.slice_locs(start=Interval(0, 2)) == (2, 3)
assert index.slice_locs(end=Interval(2, 4)) == (0, 1)
assert index.slice_locs(end=Interval(0, 2)) == (0, 3)
assert index.slice_locs(
start=Interval(2, 4), end=Interval(0, 2)) == (0, 3)
# sorted duplicates
index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4)])
assert index.slice_locs(
start=Interval(0, 2), end=Interval(2, 4)) == (0, 3)
assert index.slice_locs(start=Interval(0, 2)) == (0, 3)
assert index.slice_locs(end=Interval(2, 4)) == (0, 3)
assert index.slice_locs(end=Interval(0, 2)) == (0, 2)
assert index.slice_locs(
start=Interval(2, 4), end=Interval(0, 2)) == (2, 2)
# unsorted duplicates
index = IntervalIndex.from_tuples([(0, 2), (2, 4), (0, 2)])
pytest.raises(KeyError, index.slice_locs(
start=Interval(0, 2), end=Interval(2, 4)))
pytest.raises(KeyError, index.slice_locs(start=Interval(0, 2)))
assert index.slice_locs(end=Interval(2, 4)) == (0, 2)
pytest.raises(KeyError, index.slice_locs(end=Interval(0, 2)))
pytest.raises(KeyError, index.slice_locs(
start=Interval(2, 4), end=Interval(0, 2)))
# another unsorted duplicates
index = IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4), (1, 3)])
assert index.slice_locs(
start=Interval(0, 2), end=Interval(2, 4)) == (0, 3)
assert index.slice_locs(start=Interval(0, 2)) == (0, 4)
assert index.slice_locs(end=Interval(2, 4)) == (0, 3)
assert index.slice_locs(end=Interval(0, 2)) == (0, 2)
assert index.slice_locs(
start=Interval(2, 4), end=Interval(0, 2)) == (2, 2)
def test_slice_locs_with_ints_and_floats_succeeds(self):
# increasing non-overlapping
index = IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)])
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0, 3) == (0, 2)
assert index.slice_locs(3, 1) == (2, 1)
assert index.slice_locs(3, 4) == (2, 3)
assert index.slice_locs(0, 4) == (0, 3)
# decreasing non-overlapping
index = IntervalIndex.from_tuples([(3, 4), (1, 2), (0, 1)])
assert index.slice_locs(0, 1) == (3, 2)
assert index.slice_locs(0, 2) == (3, 1)
assert index.slice_locs(0, 3) == (3, 1)
assert index.slice_locs(3, 1) == (1, 2)
assert index.slice_locs(3, 4) == (1, 0)
assert index.slice_locs(0, 4) == (3, 0)
@pytest.mark.parametrize("query", [
[0, 1], [0, 2], [0, 3], [3, 1], [3, 4], [0, 4]])
@pytest.mark.parametrize("tuples", [
[(0, 2), (1, 3), (2, 4)], [(2, 4), (1, 3), (0, 2)],
[(0, 2), (0, 2), (2, 4)], [(0, 2), (2, 4), (0, 2)],
[(0, 2), (0, 2), (2, 4), (1, 3)]])
def test_slice_locs_with_ints_and_floats_errors(self, tuples, query):
index = IntervalIndex.from_tuples(tuples)
with pytest.raises(KeyError):
index.slice_locs(query)
@pytest.mark.parametrize('query, expected', [
([Interval(1, 3, closed='right')], [1]),
([Interval(1, 3, closed='left')], [-1]),
([Interval(1, 3, closed='both')], [-1]),
([Interval(1, 3, closed='neither')], [-1]),
([Interval(1, 4, closed='right')], [-1]),
([Interval(0, 4, closed='right')], [-1]),
([Interval(1, 2, closed='right')], [-1]),
([Interval(2, 4, closed='right'), Interval(1, 3, closed='right')],
[2, 1]),
([Interval(1, 3, closed='right'), Interval(0, 2, closed='right')],
[1, -1]),
([Interval(1, 3, closed='right'), Interval(1, 3, closed='left')],
[1, -1])])
def test_get_indexer_with_interval(self, query, expected):
tuples = [(0, 2.5), (1, 3), (2, 4)]
index = IntervalIndex.from_tuples(tuples, closed='right')
result = index.get_indexer(query)
expected = np.array(expected, dtype='intp')
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('query, expected', [
([-0.5], [-1]),
([0], [-1]),
([0.5], [0]),
([1], [0]),
([1.5], [1]),
([2], [1]),
([2.5], [-1]),
([3], [-1]),
([3.5], [2]),
([4], [2]),
([4.5], [-1]),
([1, 2], [0, 1]),
([1, 2, 3], [0, 1, -1]),
([1, 2, 3, 4], [0, 1, -1, 2]),
([1, 2, 3, 4, 2], [0, 1, -1, 2, 1])])
def test_get_indexer_with_int_and_float(self, query, expected):
tuples = [(0, 1), (1, 2), (3, 4)]
index = IntervalIndex.from_tuples(tuples, closed='right')
result = index.get_indexer(query)
expected = np.array(expected, dtype='intp')
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('tuples, closed', [
([(0, 2), (1, 3), (3, 4)], 'neither'),
([(0, 5), (1, 4), (6, 7)], 'left'),
([(0, 1), (0, 1), (1, 2)], 'right'),
([(0, 1), (2, 3), (3, 4)], 'both')])
def test_get_indexer_errors(self, tuples, closed):
# IntervalIndex needs non-overlapping for uniqueness when querying
index = IntervalIndex.from_tuples(tuples, closed=closed)
msg = ('cannot handle overlapping indices; use '
'IntervalIndex.get_indexer_non_unique')
with pytest.raises(ValueError, match=msg):
index.get_indexer([0, 2])
@pytest.mark.parametrize('query, expected', [
([-0.5], ([-1], [0])),
([0], ([0], [])),
([0.5], ([0], [])),
([1], ([0, 1], [])),
([1.5], ([0, 1], [])),
([2], ([0, 1, 2], [])),
([2.5], ([1, 2], [])),
([3], ([2], [])),
([3.5], ([2], [])),
([4], ([-1], [0])),
([4.5], ([-1], [0])),
([1, 2], ([0, 1, 0, 1, 2], [])),
([1, 2, 3], ([0, 1, 0, 1, 2, 2], [])),
([1, 2, 3, 4], ([0, 1, 0, 1, 2, 2, -1], [3])),
([1, 2, 3, 4, 2], ([0, 1, 0, 1, 2, 2, -1, 0, 1, 2], [3]))])
def test_get_indexer_non_unique_with_int_and_float(self, query, expected):
tuples = [(0, 2.5), (1, 3), (2, 4)]
index = IntervalIndex.from_tuples(tuples, closed='left')
result_indexer, result_missing = index.get_indexer_non_unique(query)
expected_indexer = Int64Index(expected[0])
expected_missing = np.array(expected[1], dtype='intp')
tm.assert_index_equal(result_indexer, expected_indexer)
tm.assert_numpy_array_equal(result_missing, expected_missing)
# TODO we may also want to test get_indexer for the case when
# the intervals are duplicated, decreasing, non-monotonic, etc..
def test_contains(self):
index = IntervalIndex.from_arrays([0, 1], [1, 2], closed='right')
# __contains__ requires perfect matches to intervals.
assert 0 not in index
assert 1 not in index
assert 2 not in index
assert Interval(0, 1, closed='right') in index
assert Interval(0, 2, closed='right') not in index
assert Interval(0, 0.5, closed='right') not in index
assert Interval(3, 5, closed='right') not in index
assert Interval(-1, 0, closed='left') not in index
assert Interval(0, 1, closed='left') not in index
assert Interval(0, 1, closed='both') not in index
def test_contains_method(self):
index = IntervalIndex.from_arrays([0, 1], [1, 2], closed='right')
assert not index.contains(0)
assert index.contains(0.1)
assert index.contains(0.5)
assert index.contains(1)
assert index.contains(Interval(0, 1, closed='right'))
assert not index.contains(Interval(0, 1, closed='left'))
assert not index.contains(Interval(0, 1, closed='both'))
assert not index.contains(Interval(0, 2, closed='right'))
assert not index.contains(Interval(0, 3, closed='right'))
assert not index.contains(Interval(1, 3, closed='right'))
assert not index.contains(20)
assert not index.contains(-20)
|
bsd-3-clause
|
thesuperzapper/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/io_test.py
|
137
|
5063
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
alphaBenj/zipline
|
zipline/examples/dual_ema_talib.py
|
1
|
3729
|
#!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
from zipline.api import order, record, symbol
from zipline.finance import commission
# Import exponential moving average from talib wrapper
from talib import EMA
def initialize(context):
context.asset = symbol('AAPL')
# To keep track of whether we invested in the stock or not
context.invested = False
# Explicitly set the commission to the "old" value until we can
# rebuild example data.
# github.com/quantopian/zipline/blob/master/tests/resources/
# rebuild_example_data#L105
context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))
def handle_data(context, data):
trailing_window = data.history(context.asset, 'price', 40, '1d')
if trailing_window.isnull().values.any():
return
short_ema = EMA(trailing_window.values, timeperiod=20)
long_ema = EMA(trailing_window.values, timeperiod=40)
buy = False
sell = False
if (short_ema[-1] > long_ema[-1]) and not context.invested:
order(context.asset, 100)
context.invested = True
buy = True
elif (short_ema[-1] < long_ema[-1]) and context.invested:
order(context.asset, -100)
context.invested = False
sell = True
record(AAPL=data.current(context.asset, "price"),
short_ema=short_ema[-1],
long_ema=long_ema[-1],
buy=buy,
sell=sell)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if 'AAPL' in results and 'short_ema' in results and 'long_ema' in results:
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index,
results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
else:
msg = 'AAPL, short_ema and long_ema data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2014-01-01', tz='utc'),
'end': pd.Timestamp('2014-11-01', tz='utc'),
}
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.