repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
FAB4D/humanitas | prediction/esn/meboot.py | 1 | 2655 | """
MEBOOT.PY - Python package for the meboot (Maximum Entropy Bootstrap) algorithm for Time Series
Author: Fabian Brix
Method by H.D. Vinod, Fordham University -
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
def sort(series):
ind_sorted = np.argsort(series)
s_sorted = series[ind_sorted]
return s_sorted, ind_sorted
def get_trm_mean(series, percent):
# FIXED
dev = np.abs(series[1:]-series[:-1])
n = len(dev)
k = n*(percent/100.0)/2.0
k = round(k,0)
return np.mean(dev[k:n-k])
def get_intermed_pts(series, s_sorted, percent):
zt = (s_sorted[:-1]+s_sorted[1:])/2.0
m_trm = get_trm_mean(series, percent)
print m_trm
z0 = s_sorted[0]-m_trm
zT = s_sorted[-1]+m_trm
z = np.hstack((z0,zt,zT))
return z
def get_intervals(z):
return np.vstack((z[:-1], z[1:])).T
def get_me_density(intervals):
return 1.0/(intervals[:,1]-intervals[:,0])
def get_cpf(me_density, intervals):
cpf = np.array([sum(me_density[:i+1]) for i in xrange(me_density.shape[0]-1)])
return cpf/np.max(cpf)
def get_quantiles(cpf, intervals, series):
quantiles = []
T = float(len(series))
t = np.arange(T+1)
Rt = np.vstack((t[:-1]/T,t[1:]/T)).T
print Rt
for d in xrange(series.shape[0]):
u = np.random.uniform(0,1)
for i in xrange(cpf.shape[0]):
cp = cpf[i]
if u <= cp:
cpm = cpf[i-1]
if i == 0:
cpm = 0
m = (cp-cpm)/1.0*(intervals[i,1]-intervals[i,0])
xp = (u - cpm)*1.0/m+intervals[i,0]
quantiles.append(xp)
break
return np.array(quantiles)
def meboot(series, replicates):
# ASC by default
print series
np.random.seed(0)
s_sorted, ind_sorted = sort(series)
z = get_intermed_pts(series, s_sorted, 10)
#print 'z ', z
intervals = get_intervals(z)
#print 'intervals ', intervals
me_density = get_me_density(intervals)
#print 'uni dens ', me_density
cpf = get_cpf(me_density, intervals)
#print 'cpf ', cpf
quantiles = get_quantiles(cpf, intervals, series)
#print 'quantiles ', quantiles
quantiles = np.sort(quantiles)
replicate = quantiles[ind_sorted]
print 'replicate ', replicate
# TODO: Undertand and add repeat mechanism
plt.plot(series, color='r')
plt.plot(replicate, color='b')
plt.ylim(0,30)
plt.show()
def main(args):
series = np.array([4,12,36,20,8])
meboot(series, 1)
if __name__ == "__main__":
if sys.argv < 2:
print 'hello'
else:
main(*sys.argv)
| bsd-3-clause |
milkpku/BetaElephant | policy_experiment/analysis.py | 1 | 1189 | import numpy as np
import matplotlib.pyplot as plt
def load_log_file(file_path):
fh = open(file_path)
s = fh.readlines()
accuarcy = np.zeros((len(s),))
for i in range(len(s)):
accuarcy[i] = float(s[i][-5:-1])
return accuarcy
def smooth(array, window=250):
count = 0
for i in range(1, window):
count += array[i:i-window]
count /= window
return count
if __name__=='__main__':
watch_list = [
#'policy.orign',
#'policy.add-enemymove',
#'policy.add-enemyprot',
'policy.add-all',
'policy.fast-policy',
'policy.resNet.add-all',
'policy.pip.add-all',
'policy.fc.add-all'
]
plot_list = []
for folder in watch_list:
a = load_log_file(folder+'/log.txt')
a = a[a<1]
a = smooth(a)
f, = plt.plot(a)
plot_list.append(f)
plt.legend(plot_list, watch_list, loc=4)
plt.xlim(xmin=0, xmax=10000)
plt.xlabel('epoch')
plt.ylabel('accuracy')
# plt.title('Validation Accuracy for Different Feature')
plt.title('Validation Accuracy for Different Model')
plt.show()
| mit |
mwv/scikit-learn | sklearn/lda.py | 72 | 17751 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
iwegner/MITK | Modules/Biophotonics/python/iMC/scripts/ipcai_to_theano/input_icai_data.py | 6 | 3612 | """
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
from __future__ import print_function
import os
import numpy
import pandas as pd
import numpy as np
import theano
import theano.tensor as T
from regression.preprocessing import preprocess
__docformat__ = 'restructedtext en'
def create_dataset(path_to_simulation_results):
df = pd.read_csv(path_to_simulation_results, header=[0, 1])
X, y = preprocess(df, snr=10.)
y = y.values
return X, y
def load_data(data_root):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
TRAIN_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_train_all_spectrocam.txt")
TEST_IMAGES = os.path.join(data_root,
"ipcai_revision_colon_mean_scattering_test_all_spectrocam.txt")
train_set = create_dataset(TRAIN_IMAGES)
valid_set = create_dataset(TEST_IMAGES)
test_set = (np.load("sample_image.npy"), np.array([0]))
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, shared_y
test_set_x, test_set_y = shared_dataset(test_set, 0)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
| bsd-3-clause |
james-jz-zheng/jjzz | ml/stock_prediction.py | 1 | 7279 | import yahoo_finance as yhf
from sklearn import *
import os.path, os, sys
import pickle
import numpy as np
import datetime as dt
# import pandas as pd
def next_biz_day(d):
nd = d+dt.timedelta(days=1)
return nd if nd.weekday() in range(5) else next_biz_day(nd)
def prev_biz_day(d):
pd = d-dt.timedelta(days=1)
return pd if pd.weekday() in range(5) else prev_biz_day(pd)
def get_raw(s_name, start, end):
FILE_PATH, PATH_SEPERATOR = (os.environ.get('TEMP'), '\\') if sys.platform.startswith('win') else (r'/tmp', '/')
file_name = FILE_PATH + PATH_SEPERATOR + s_name + start + end + '.txt'
if os.path.isfile(file_name):
with open(file_name,'r') as f:
raw = pickle.load(f)
else:
raw = yhf.Share(s_name).get_historical(start,end)
with open(file_name,'w') as f:
pickle.dump(raw, f)
return raw
def get_s(s_name, start, end, field):
return [float(i[field]) for i in get_raw(s_name, start, end)][::-1]
def get_str(s_name, start, end, field):
return [str(i[field]) for i in get_raw(s_name, start, end)][::-1]
def get_diff(arr):
return [0] + [2.0*(arr[i+1] - arr[i])/(arr[i+1] + arr[i]) for i in range(len(arr) - 1)]
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-1.0 * z))
def nomalize(arr):
x = np.array(arr)
min, max = x[np.argmin(x)], x[np.argmax(x)]
return ((x - min) / (max - min))*2.0 -1
def average(arr, ndays):
a = [[arr[0]] * i + arr[:-i] if i>0 else arr for i in range(ndays)]
k = np.zeros_like(a[0])
for i in range(ndays):
k += np.array(a[i])
return np.array(k) / float(ndays)
def ave_n(n):
return lambda x:average(x, n)
def offset(arr, ndays):
a = [arr[0]] * ndays + arr[:-ndays]
return np.array(a)
def offset_n(n):
return lambda x:offset(x, n)
def merge_fs(fs):
return fs[0] if len(fs) == 1 else lambda *args: (merge_fs(fs[1:]))(fs[0](*args))
# --- Run parameters ---
x_names = 'MSFT|AAPL|GOOG|FB|INTC|AMZN|BIDU'.split('|')
y_name = 'BIDU'
percentage_for_training = 0.95
se_dates = [dt.datetime(*d) for d in [(2013,1,3), (2017,10,20)]]
print se_dates
input_start, input_end = [d.strftime('%Y-%m-%d') for d in se_dates]
se_dates = [next_biz_day(d) for d in se_dates]
print se_dates
predict_start, predict_end = [d.strftime('%Y-%m-%d') for d in se_dates]
# training dataset selection
lwfs = [
# label, weight, methods
('Close', 2.0, [get_s, nomalize, sigmoid]),
('Close', 5.0, [get_s, get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(1), nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(2), nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(3), nomalize, sigmoid]),
('Close', 1.0, [get_s, get_diff, offset_n(4), nomalize, sigmoid]),
('Open', 3.0, [get_s, get_diff, nomalize, sigmoid]),
('High', 2.0, [get_s, get_diff, nomalize, sigmoid]),
('Low', 2.0, [get_s, get_diff, nomalize, sigmoid]),
('Volume', 1.0, [get_s, nomalize, sigmoid]),
('Volume', 1.0, [get_s, ave_n(5), nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(2), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(2), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(3), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(3), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(5), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(5), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(10), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(10), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(20), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(20), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(30), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(30), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(50), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(50), get_diff, nomalize, sigmoid]),
('Close', 1.0, [get_s, ave_n(80), get_diff, nomalize, sigmoid]),
('Open', 1.0, [get_s, ave_n(80), get_diff, nomalize, sigmoid]),
]
train_X_all = zip(*[w*(merge_fs(fs)(i, input_start, input_end, l)) for i in x_names for l,w,fs in lwfs])
train_Y_all = get_diff(get_s(y_name, predict_start, predict_end, 'Close'))
# train_Y_all_10 = [1 if i>0 else -1 for i in get_diff(get_s(y_name, predict_start, predict_end, 'Close'))]
xx1 = get_str(y_name, predict_start, predict_end, 'Date')
xx2 = get_s(y_name, predict_start, predict_end, 'Close')
print zip(xx1,xx2)[-10:]
print "Running for input X({0}) and Y({1})...".format(len(train_X_all), len(train_Y_all))
if len(train_X_all) != len(train_Y_all):
raise Exception("### Uneven input X({0}) and Y({1}), please Check!!!".format(len(train_X_all), len(train_Y_all)))
n_train_data = int(len(train_X_all)*percentage_for_training)
train_X, train_Y = train_X_all[30:n_train_data], train_Y_all[30:n_train_data]
test_X, test_Y = train_X_all[n_train_data:], train_Y_all[n_train_data:]
# fit and predict
def fit_and_predict(sklnr, train_X, train_Y, test_X):
sklnr.fit(train_X ,train_Y)
out_Y = sklnr.predict(test_X)
actual_vs_predict = zip(*[test_Y, out_Y])
matched_count = [1 if i[0]*i[1]>0 else 0 for i in actual_vs_predict]
accuracy = 1.0* sum(matched_count)/len(matched_count)
print 'Accuracy: {0}% Train({1}):Test({2}) - Model: {3}'.format(
int(accuracy*1000)/10.0,
len(train_Y),
len(test_Y),
str(sklnr).replace('\n','')[:140])
print 'output: {}'.format(actual_vs_predict[-10:])
# choose different learners
learner = [
# naive_bayes.GaussianNB(),
# linear_model.SGDClassifier(),
# svm.SVC(),
# tree.DecisionTreeClassifier(),
# ensemble.RandomForestClassifier(),
ensemble.AdaBoostRegressor(),
ensemble.BaggingRegressor(),
ensemble.ExtraTreesRegressor(),
ensemble.GradientBoostingRegressor(),
ensemble.RandomForestRegressor(),
gaussian_process.GaussianProcessRegressor(),
linear_model.HuberRegressor(),
linear_model.PassiveAggressiveRegressor(),
linear_model.RANSACRegressor(),
linear_model.SGDRegressor(),
linear_model.TheilSenRegressor(),
# multioutput.MultiOutputRegressor(),
neighbors.KNeighborsRegressor(),
neighbors.RadiusNeighborsRegressor(),
neural_network.MLPRegressor(),
tree.DecisionTreeRegressor(),
tree.ExtraTreeRegressor(),
### linear_model.SGDRegressor(),
### tree.DecisionTreeRegressor(),
### ensemble.RandomForestRegressor(),
### neural_network.MLPRegressor(activation='tanh', solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(15, 2), random_state=1)
# neural_network.MLPClassifier(activation='tanh', solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(15, 2), random_state=1)
]
# run
for l in learner:
try:
fit_and_predict(l, train_X, train_Y, test_X)
except:
pass
| gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/event_handling/pick_event_demo.py | 4 | 6436 | #!/usr/bin/env python
"""
You can enable picking by setting the "picker" property of an artist
(for example, a matplotlib Line2D, Text, Patch, Polygon, AxesImage,
etc...)
There are a variety of meanings of the picker property
None - picking is disabled for this artist (default)
boolean - if True then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
float - if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, for example, the indices of the data within
epsilon of the pick event
function - if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event.
hit, props = picker(artist, mouseevent)
to determine the hit test. If the mouse event is over the
artist, return hit=True and props is a dictionary of properties
you want added to the PickEvent attributes
After you have enabled an artist for picking by setting the "picker"
property, you need to connect to the figure canvas pick_event to get
pick callbacks on mouse press events. For example,
def pick_handler(event):
mouseevent = event.mouseevent
artist = event.artist
# now do something with this...
The pick event (matplotlib.backend_bases.PickEvent) which is passed to
your callback is always fired with two attributes:
mouseevent - the mouse event that generate the pick event. The
mouse event in turn has attributes like x and y (the coordinates in
display space, such as pixels from left, bottom) and xdata, ydata (the
coords in data space). Additionally, you can get information about
which buttons were pressed, which keys were pressed, which Axes
the mouse is over, etc. See matplotlib.backend_bases.MouseEvent
for details.
artist - the matplotlib.artist that generated the pick event.
Additionally, certain artists like Line2D and PatchCollection may
attach additional meta data like the indices into the data that meet
the picker criteria (for example, all the points in the line that are within
the specified epsilon tolerance)
The examples below illustrate each of these methods.
"""
from __future__ import print_function
from matplotlib.pyplot import figure, show
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from matplotlib.image import AxesImage
import numpy as np
from numpy.random import rand
if 1: # simple picking, lines, rectangles and text
fig = figure()
ax1 = fig.add_subplot(211)
ax1.set_title('click on points, rectangles or text', picker=True)
ax1.set_ylabel('ylabel', picker=True, bbox=dict(facecolor='red'))
line, = ax1.plot(rand(100), 'o', picker=5) # 5 points tolerance
# pick the rectangle
ax2 = fig.add_subplot(212)
bars = ax2.bar(range(10), rand(10), picker=True)
for label in ax2.get_xticklabels(): # make the xtick labels pickable
label.set_picker(True)
def onpick1(event):
if isinstance(event.artist, Line2D):
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
print('onpick1 line:', zip(np.take(xdata, ind), np.take(ydata, ind)))
elif isinstance(event.artist, Rectangle):
patch = event.artist
print('onpick1 patch:', patch.get_path())
elif isinstance(event.artist, Text):
text = event.artist
print('onpick1 text:', text.get_text())
fig.canvas.mpl_connect('pick_event', onpick1)
if 1: # picking with a custom hit test function
# you can define custom pickers by setting picker to a callable
# function. The function has the signature
#
# hit, props = func(artist, mouseevent)
#
# to determine the hit test. if the mouse event is over the artist,
# return hit=True and props is a dictionary of
# properties you want added to the PickEvent attributes
def line_picker(line, mouseevent):
"""
find the points within a certain distance from the mouseclick in
data coords and attach some extra attributes, pickx and picky
which are the data points that were picked
"""
if mouseevent.xdata is None: return False, dict()
xdata = line.get_xdata()
ydata = line.get_ydata()
maxd = 0.05
d = np.sqrt((xdata-mouseevent.xdata)**2. + (ydata-mouseevent.ydata)**2.)
ind = np.nonzero(np.less_equal(d, maxd))
if len(ind):
pickx = np.take(xdata, ind)
picky = np.take(ydata, ind)
props = dict(ind=ind, pickx=pickx, picky=picky)
return True, props
else:
return False, dict()
def onpick2(event):
print('onpick2 line:', event.pickx, event.picky)
fig = figure()
ax1 = fig.add_subplot(111)
ax1.set_title('custom picker for line data')
line, = ax1.plot(rand(100), rand(100), 'o', picker=line_picker)
fig.canvas.mpl_connect('pick_event', onpick2)
if 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection)
x, y, c, s = rand(4, 100)
def onpick3(event):
ind = event.ind
print('onpick3 scatter:', ind, np.take(x, ind), np.take(y, ind))
fig = figure()
ax1 = fig.add_subplot(111)
col = ax1.scatter(x, y, 100*s, c, picker=True)
#fig.savefig('pscoll.eps')
fig.canvas.mpl_connect('pick_event', onpick3)
if 1: # picking images (matplotlib.image.AxesImage)
fig = figure()
ax1 = fig.add_subplot(111)
im1 = ax1.imshow(rand(10,5), extent=(1,2,1,2), picker=True)
im2 = ax1.imshow(rand(5,10), extent=(3,4,1,2), picker=True)
im3 = ax1.imshow(rand(20,25), extent=(1,2,3,4), picker=True)
im4 = ax1.imshow(rand(30,12), extent=(3,4,3,4), picker=True)
ax1.axis([0,5,0,5])
def onpick4(event):
artist = event.artist
if isinstance(artist, AxesImage):
im = artist
A = im.get_array()
print('onpick4 image', A.shape)
fig.canvas.mpl_connect('pick_event', onpick4)
show()
| mit |
DarthThanatos/citySimNG | citySimNGView/extra/scrollable_wx_matplotlib.py | 1 | 2647 | from numpy import arange, sin, pi
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
import wx
class MyFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'scrollable plot',
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER,
size=(800, 400))
self.panel = wx.Panel(self, -1)
self.fig = Figure((5, 4), 75)
self.canvas = FigureCanvasWxAgg(self.panel, -1, self.fig)
self.scroll_range = 400
self.canvas.SetScrollbar(wx.HORIZONTAL, 0, 5,
self.scroll_range)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas, -1, wx.EXPAND)
self.panel.SetSizer(sizer)
self.panel.Fit()
self.init_data()
self.init_plot()
self.canvas.Bind(wx.EVT_SCROLLWIN, self.OnScrollEvt)
def init_data(self):
# Generate some data to plot:
self.dt = 0.01
self.t = arange(0, 5, self.dt)
self.x = sin(2 * pi * self.t)
# Extents of data sequence:
self.i_min = 0
self.i_max = len(self.t)
# Size of plot window:
self.i_window = 100
# Indices of data interval to be plotted:
self.i_start = 0
self.i_end = self.i_start + self.i_window
def init_plot(self):
self.axes = self.fig.add_subplot(111)
self.plot_data = \
self.axes.plot(self.t[self.i_start:self.i_end],
self.x[self.i_start:self.i_end])[0]
def draw_plot(self):
# Update data in plot:
self.plot_data.set_xdata(self.t[self.i_start:self.i_end])
self.plot_data.set_ydata(self.x[self.i_start:self.i_end])
# Adjust plot limits:
self.axes.set_xlim((min(self.t[self.i_start:self.i_end]),
max(self.t[self.i_start:self.i_end])))
self.axes.set_ylim((min(self.x[self.i_start:self.i_end]),
max(self.x[self.i_start:self.i_end])))
# Redraw:
self.canvas.draw()
def OnScrollEvt(self, event):
# Update the indices of the plot:
self.i_start = self.i_min + event.GetPosition()
self.i_end = self.i_min + self.i_window + event.GetPosition()
self.draw_plot()
class MyApp(wx.App):
def OnInit(self):
self.frame = MyFrame(parent=None, id=-1)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
if __name__ == '__main__':
app = MyApp()
app.MainLoop() | apache-2.0 |
elijah513/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/plot_from_pp_interp_p_levs_temp_geop_sp_hum.py | 1 | 13209 | """
Load pp, plot and save
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
#pp_file = ''
plot_diags=['temp', 'sp_hum']
#plot_diags=['sp_hum']
plot_levels = [925, 850, 700, 500]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['dkbhu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
experiment_ids = ['dkbhu']
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
def add_hour_of_day(cube, coord, name='hour'):
add_categorised_coord(cube, name, coord,
lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = 660.
clev_max = 810.
elif p_level == 850:
clev_min = 1435.
clev_max = 1530.
elif p_level == 700:
clev_min = 3090.
clev_max = 3155.
elif p_level == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = 300.
clevpt_max = 312.
elif p_level == 850:
clevpt_min = 302.
clevpt_max = 310.
elif p_level == 700:
clevpt_min = 312.
clevpt_max = 320.
elif p_level == 500:
clevpt_min = 325.
clevpt_max = 332.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = 0.012
clevsh_max = 0.020
elif p_level == 850:
clevsh_min = 0.007
clevsh_max = 0.017
elif p_level == 700:
clevsh_min = 0.002
clevsh_max = 0.010
elif p_level == 500:
clevsh_min = 0.001
clevsh_max = 0.005
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 5)
p_level_constraint = iris.Constraint(pressure=p_level)
for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pp_file = '%s_%s_on_p_levs_mean_by_hour.pp' % (experiment_id, plot_diag)
pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile, p_level_constraint)
# For each hour in cube
height_pp_file = '%s_408_on_p_levs_mean_by_hour.pp' % (experiment_id)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
height_cube = iris.load_cube(height_pfile, p_level_constraint)
print pcube
print height_cube
#time_coords = cube_f.coord('time')
add_hour_of_day(pcube, pcube.coord('time'))
add_hour_of_day(height_cube, height_cube.coord('time'))
#pcube.remove_coord('time')
#cube_diff.remove_coord('time')
#height_cube.remove_coord('time')
#height_cube_diff.remove_coord('time')
#p_cube_difference = iris.analysis.maths.subtract(pcube, cube_diff, dim='hour')
#height_cube_difference = iris.analysis.maths.subtract(height_cube, height_cube_diff, dim='hour')
#pdb.set_trace()
#del height_cube, pcube, height_cube_diff, cube_diff
for t, time_cube in enumerate(pcube.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
print time_cube
height_cube_slice = height_cube.extract(iris.Constraint(hour=time_cube.coord('hour').points))
# Get time of averagesfor plot title
h = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).strftime('%H%M')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_utc = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).replace(tzinfo=from_zone)
h_local = h_utc.astimezone(to_zone).strftime('%H%M')
fig = plt.figure(**figprops)
cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
lat = time_cube.coord('grid_latitude').points
lon = time_cube.coord('grid_longitude').points
cs = time_cube.coord_system('CoordSystem')
lons, lats = np.meshgrid(lon, lat)
lons, lats = iris.analysis.cartography.unrotate_pole\
(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
x,y = m(lons,lats)
if plot_diag=='temp':
min_contour = clevpt_min
max_contour = clevpt_max
cb_label='K'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
and wind (vectors) %s UTC %s IST' % (h, h_local)
tick_interval=2
clev_number=max_contour-min_contour+1
elif plot_diag=='sp_hum':
min_contour = clevsh_min
max_contour = clevsh_max
cb_label='kg/kg'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
and wind (vectors) %s UTC %s IST' % (h, h_local)
tick_interval=0.002
clev_number=(max_contour-min_contour+0.001)*(10**3)
clevs = np.linspace(min_contour, max_contour, clev_number)
#clevs = np.linspace(-3, 3, 32)
cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
#cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
cs_lin = iplt.contour(height_cube_slice, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
#cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_%s_%s_hPa_and_geop_height_%s' % (experiment_id, plot_diag, p_level, h)
save_dir = '%s%s/%s' % (save_path, experiment_id, plot_diag)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
fig.savefig('%s/%s_notitle.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
plt.title('%s UTC %s IST' % (h, h_local))
fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
| mit |
deepinsight/Deformable-ConvNets | lib/dataset/pycocotools/coco.py | 5 | 18005 | __author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]).clip(max=h-1), \
np.array(s[0:N:2]).clip(max=w-1)) # (y, x)
M[rr, cc] = 1
return M
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = mask.frPyObjects(segm, h, w)
rle = mask.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = mask.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = mask.decode(rle)
return m
| apache-2.0 |
Sapphirine/Stock-price-Movement-Prediction | pydoop/predict_new.py | 1 | 4729 | __author__ = 'arkilic'
import csv
import numpy as np
from sklearn.linear_model import SGDRegressor
import random
import pprint
raise NotImplementedError('This is the non-pydoop version, please see predict_new_map_red.py for the application')
print "Prediction on IBM 30 year data:"
f = open('HD-1984-2014-d.csv')
tmp_data = csv.reader(f)
my_data = list()
for item in tmp_data:
tmp_item = list()
for i in item:
tmp_item.append(i)
my_data.append(tmp_item)
data = my_data[1:]
X = list()
training_indices = list()
for i in xrange(int(len(data)*0.8)):
training_indices.append(i)
test_indices = list()
for i in xrange(int(len(data))):
if i in training_indices:
pass
else:
if i == 0:
pass
else:
test_indices.append(i)
for s_data in data:
X.append(map(float, s_data[1:5]))
y = list()
y2 = list()
for s_data in data:
y.append(float(s_data[4]))
y2.append(float(s_data[1]))
pprint.pprint('Training the supervised learning model... Fit on training data')
print('=========================================')
try:
clf = SGDRegressor(loss="huber")
pprint.pprint(clf.fit(X, y))
except:
raise
try:
clf2 = SGDRegressor(loss="huber")
pprint.pprint(clf2.fit(X, y2))
except:
raise
print('=========================================')
print 'Model testing itself! Confidence score on the training data used to construct:', clf.score(X, y)
pprint.pprint('Ready to predict')
print('=========================================')
pprint.pprint('Testing with test data...')
test_data = list()
test_diff = list()
predict_diff = list()
for index in test_indices:
tmp = data[index][1:5]
my_tmp = list()
for item in tmp:
my_tmp.append(float(item))
test_data.append(my_tmp)
test_diff.append(float(data[index][4]) - float(data[index][1]))
# #
prediction_results_close = clf.predict(test_data)
prediction_results_open = clf2.predict(test_data)
for i in xrange(len(prediction_results_close)):
p_diff = prediction_results_close[i] - prediction_results_open[i]
predict_diff.append(p_diff)
print test_diff
print predict_diff
test_inc =0
for diff in test_diff:
if diff > 0:
test_inc += 1
p_inc =0
total_diff = 0
s = 0
for diff in predict_diff:
total_diff += diff
s += 1
if diff > -0.22:
p_inc += 1
pprint.pprint(total_diff/float(s))
print "The accuracy of the stock price prediction with 30 years of data ..: ", (p_inc/float(test_inc))*100
print "=========================================================================================\n"
print "Prediction on IBM 10 year data:"
f = open('HD-2004-2014-d.csv')
tmp_data = csv.reader(f)
my_data = list()
for item in tmp_data:
tmp_item = list()
for i in item:
tmp_item.append(i)
my_data.append(tmp_item)
data = my_data[1:]
X = list()
training_indices = list()
for i in xrange(int(len(data)*0.8)):
training_indices.append(i)
test_indices = list()
for i in xrange(int(len(data))):
if i in training_indices:
pass
else:
if i == 0:
pass
else:
test_indices.append(i)
for s_data in data:
X.append(map(float, s_data[1:5]))
y = list()
y2 = list()
for s_data in data:
y.append(float(s_data[4]))
y2.append(float(s_data[1]))
pprint.pprint('Training the supervised learning model... Fit on training data')
print('=========================================')
try:
clf = SGDRegressor(loss="huber")
pprint.pprint(clf.fit(X, y))
except:
raise
try:
clf2 = SGDRegressor(loss="huber")
pprint.pprint(clf2.fit(X, y2))
except:
raise
print('=========================================')
print 'Model testing itself! Confidence score on the training data used to construct:', clf.score(X, y)
pprint.pprint('Ready to predict')
print('=========================================')
pprint.pprint('Testing with test data...')
test_data = list()
test_diff = list()
predict_diff = list()
for index in test_indices:
tmp = data[index][1:5]
my_tmp = list()
for item in tmp:
my_tmp.append(float(item))
test_data.append(my_tmp)
test_diff.append(float(data[index][4]) - float(data[index][1]))
# #
prediction_results_close = clf.predict(test_data)
prediction_results_open = clf2.predict(test_data)
for i in xrange(len(prediction_results_close)):
p_diff = prediction_results_close[i] - prediction_results_open[i]
predict_diff.append(p_diff)
print test_diff
print predict_diff
p = 0
for entry in test_diff:
if entry > 0:
p += 1
k=0
for entry in predict_diff:
if entry>-0.77:
k += 1
print "The accuracy of the stock price prediction with 10 years of data ..: ", (p/float(k))*100
| apache-2.0 |
sanketloke/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28856 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/en/ensemble/plot_forest_importances.py | 168 | 1793 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
| gpl-3.0 |
mraspaud/dask | dask/dataframe/tests/test_optimize_dataframe.py | 3 | 1663 | import pytest
from operator import getitem
from toolz import merge
import dask
from dask.dataframe.optimize import dataframe_from_ctable
import dask.dataframe as dd
import pandas as pd
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
dfs = list(dsk.values())
def test_column_optimizations_with_bcolz_and_rewrite():
bcolz = pytest.importorskip('bcolz')
bc = bcolz.ctable([[1, 2, 3], [10, 20, 30]], names=['a', 'b'])
for cols in [None, 'abc', ['abc']]:
dsk2 = merge(dict((('x', i),
(dataframe_from_ctable, bc, slice(0, 2), cols, {}))
for i in [1, 2, 3]),
dict((('y', i),
(getitem, ('x', i), ['a', 'b']))
for i in [1, 2, 3]))
expected = dict((('y', i), (dataframe_from_ctable,
bc, slice(0, 2), ['a', 'b'], {}))
for i in [1, 2, 3])
result = dd.optimize(dsk2, [('y', i) for i in [1, 2, 3]])
assert result == expected
def test_fuse_ave_width():
df = pd.DataFrame({'x': range(10)})
df = dd.from_pandas(df, npartitions=5)
s = ((df.x + 1) + (df.x + 2))
with dask.set_options(fuse_ave_width=4):
a = s._optimize(s.dask, s._keys())
b = s._optimize(s.dask, s._keys())
assert len(a) < len(b)
assert len(a) <= 15
| bsd-3-clause |
DataViva/dataviva-scripts | scripts/crosswalk/format_raw_data.py | 1 | 1434 | # -*- coding: utf-8 -*-
import os, sys, time, bz2, click
import pandas as pd
import pandas.io.sql as sql
import numpy as np
import itertools
@click.command()
@click.argument('file_path', type=click.Path(exists=True))
@click.option('output_path', '--output', '-o', help='Path to save files to.', type=click.Path(), required=True, prompt="Output path")
def main(file_path, output_path):
nestings = []
fieldA = "hs"
fieldB = "cnae"
df = pd.read_csv(file_path, converters={fieldA: str, fieldB: str})
df = df[ (df[fieldA].str.len() > 0) & (df[fieldB].str.len() >0)]
df = df[[fieldA, fieldB]]
if fieldA == "hs":
df.hs = df.hs.str.slice(2, 6)
df = df.drop_duplicates()
print df
print
print
# depths = {"hs" : [2, 6], "cnae": [1, 5]}
# for depthcol, lengths in depths.items():
# my_nesting.append(lengths)
# my_nesting_cols.append(depthcol)
# print my_nesting, my_nesting_cols
# for depths in itertools.product(*my_nesting):
# series = {}
# print depths
# for col_name, l in zip(my_nesting_cols, depths):
# series[col_name] = df[col_name].str.slice(0, l)
# addtl_rows = pd.DataFrame(series)
# full_table = pd.concat([addtl_rows, full_table])
# # print pk
# print full_table
df.to_csv("pi_crosswalk.csv", index=False)
if __name__ == "__main__":
main()
| mit |
13anjou/Research-Internship | Analysis/Dictionnary.py | 1 | 23710 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import csv
from pylab import*
import numpy
import math
import matplotlib.pyplot as plt
import numbers
from allTest import k, s
def main() :
dicoEchant = corres()
dicoTotal = dict()
#On va importer toutes les donnees :
#0 echantillon
#1 u
#2 v
#3 Okubo Weiss
#4 Lyapunov exp
#5 SST_adv
#6 SST_AMSRE
#7 grad_SST_adv
#8 age_from_bathy
#9 lon_from_bathy
#10 Shannon_Darwin_mean_all
#11 Shannon_Darwin_month_all
#12 Shannon_Darwin_mean_grp
#13 Shannon_Darwin_month_grp
#14 Shannon_Darwin_physat_month
#15 Shannon_Darwin_physat_mean
#16 Shannon_Darwin_retention
#17 lon_origin_45d
#18 lat_origin_45d
#19 physat_type
#20 abundance
#21 richness
#22 Shannon
#23 Simpson
#24 log(alpha)
#25 Jevenness
#26 S.obs especes observees
#27 S.chao1 indice de chao observe
#28 se.chao1 standard error estimee sur l'indice de chao
#29 S.ACE abundance based coverage estimation
#30 se.ACE standard error on the abundance based coverage estimation
#31 distance à la powerlaw
#32 nb de pentes
#33 pente 1 (derniere pente, la queue)
#34 pente 2 (avant derniere pente)
#35 longueur de la pente 1 (derniere pente, la queue)
#36 longueur de la pente 2 (avant derniere pente)
#37 poids de la pente 1
#38 poids de la pente 2
#On commence par importer Tara_stations_multisat_extract.csv
premier = True
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\Tara_stations_multisat_extract.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
if premier :
premier = False
l=len(row)
liste_param = ['echantillon']+row[1:l]
elif row ==[] :
pass
else :
dicoTotal[int(float(row[0])),5] = ['nan']+row[1:l]
dicoTotal[int(float(row[0])),20] = ['nan']+row[1:l]
dicoTotal[int(float(row[0])),180] = ['nan']+row[1:l]
dicoTotal[int(float(row[0])),2000] = ['nan']+row[1:l]
dicoTotal[11,5] = ['nan']*l
dicoTotal[11,20] = ['nan']*l
dicoTotal[11,180] = ['nan']*l
dicoTotal[11,2000] = ['nan']*l
#Puis on import les echantillons mais on les ajoute en premier
#Attention au fait qu'il y a les SUR et les DCM !
for clef in dicoEchant :
if dicoEchant[clef][1] == 'SUR' :
try :
dicoTotal[dicoEchant[clef][0],dicoEchant[clef][2]][0]=clef
except :
pass
#Puis on importe les ribotypes
premier=True
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\diversityProtistRibotypesORGNL', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
l= len(row)
if premier :
premier =False
else :
try :
a=dicoEchant[row[0]]
if a[1] == 'SUR' :
dicoTotal[a[0],a[2]] = dicoTotal[a[0],a[2]] + row[1:l]
except :
pass
#A ce moment, chaque ligne du dictionnaire devrait contenir 26 elements
#On s'en assure pour ne pas avoir de decalage.
l1 = 26
for clef in dicoTotal :
l2 = len(dicoTotal[clef])
if l2 < 26 :
dicoTotal[clef] = dicoTotal[clef] + ['nan']*(26-l2)
elif l2>26 :
print("entree {} trop longue !".format(clef[0]))
pass
else :
pass
#Puis on import les donnees de richness
premier = True
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\Richness_NonParametrics', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
if premier :
premier = False
else :
try :
row2 = decoupage(row) #On separe les elements de la ligne au niveau des espaces
l=len(row2)
if dicoEchant[row2[0]][1] == 'SUR' :
a=dicoEchant[row2[0]]
dicoTotal[a[0],a[2]] = dicoTotal[a[0],a[2]] + row2[1:l]
except :
pass
#A ce moment, chaque ligne du dictionnaire devrait contenir 30 elements
#On s'en assure pour ne pas avoir de decalage.
l1 = 31
for clef in dicoTotal :
l2 = len(dicoTotal[clef])
if l2 < 31 :
#print("perte de donnees sur l'echantillon' {}".format(clef[0]))
dicoTotal[clef] = dicoTotal[clef] + ['nan']*(31-l2)
elif l2>31 :
print("entree {} trop longue !".format(clef[0]))
pass
#On importe les distance à la loi de puissance
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Resultats\\log_log\\10_{}\\toutes\\ecartPowerLaw_{}.csv'.format(k,k), 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';',quotechar=',', quoting=csv.QUOTE_MINIMAL)
for row in reader :
for clef in dicoTotal :
if float(enlever(row[0])) == clef[0] :
try :
dicoTotal[clef][31]=float(enlever(row[1]))
except :
dicoTotal[clef]=dicoTotal[clef]+[float(enlever(row[1]))]
elif len(dicoTotal[clef])==31 :
dicoTotal[clef] = dicoTotal[clef] + [-100]
#Il ne reste qu'a importer les pentes
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Resultats\\log_log\\10_{}\\toutes\\pentes_{}.csv'.format(k,k), 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=';',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader :
pStations = float(remove(row[0],','))
pPentes1 = float(remove(row[2],','))
pNb = int(float(remove(row[3],',')))
diam = int(float(remove(row[4],',')))
if int(float(remove(row[3],','))) == 1 :
pPentes2 = 'Null'
pLong2 = 'Null'
pLong1 = float(remove(row[5],','))
poids1 = float(remove(row[6],','))
poids2 = 'Null'
else :
pLong2 = float(remove(row[5],','))
pLong1 = float(remove(row[6],','))
pPentes2 = float(remove(row[1],','))
poids2 = float(remove(row[7],','))
poids1 = float(remove(row[8],','))
try :
dicoTotal[pStations,diam] = dicoTotal[pStations,diam] + [pNb,pPentes1,pPentes2,pLong1,pLong2,poids1,poids2]
except :
pass
#On va enfin transformer tous les elements en nombres, quand c'est possible
indice = 0
for clef in dicoTotal :
indice = 0
for i in dicoTotal[clef] :
try :
dicoTotal[clef][indice] = float(dicoTotal[clef][indice])
except :
(a,b) = tentative(dicoTotal[clef][indice])
if a :
dicoTotal[clef][indice] = b
pass
indice = indice + 1
return(dicoTotal)
def corres() :
with open('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\photic_samples.csv', 'r') as csvfile:
echantillons = dict()
reader = csv.reader(csvfile, delimiter=';', quotechar='|') #Ici on utilise un CSV avec des , comme separateurs
for row in reader:
a= row[0]
b=row[2] #Station
c= row[1] #Sur ou dcm
if row[3]=='180-2000' :
d=2000
if row[3]=='0.8-5' :
d=5
if row[3]=='20-180' :
d=180
if row[3]=='5a20' :
d=20
echantillons[a] = (int(float(b)),c,d)
return(echantillons)
def decoupage(row) :
res=list()
a = list()
for lettre in row[0] :
if lettre == ' ' :
res = res + [a]
a=''
else :
if str(a)=='[]T' :
a = 'T'
a = str(a) + str(lettre)
res = res + [a]
return(res)
def tracer(a, b) : #Quand on veut juste placer pour chaque stattion un parametre en fonction d'un autre.
#Ne fonctionne pas avec les pentes !
liste = selection(a,b)
abcisse = list()
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
noma = noms[a]
nomb = noms[b]
bool5 = True
bool20 = True
bool180 = True
bool2000 = True
for i in liste :
ajout = True
for dot in abcisse :
if dot == i[2] :
ajout = False
if str(i[2]) == 'nan' :
ajout = False
if ajout :
abcisse = abcisse + [i[2]]
prec = i[2]
abcisse.sort()
l = len(abcisse)
plt.figure(figsize=(30,20))
xticks(arange(l),abcisse,rotation=90,fontsize=40)
indice = 0
plt.figure(figsize=(30,20))
indice = 0
for dot in abcisse :
for i in liste :
if i[2]==dot :
if i[1] == 5 :
c = 'red'
if bool5 :
bool5 = False
scatter(i[2],i[3],color=c,s=120,label='taille 0.8 a 5, queue')
annotate(i[0],(i[2],i[3]))
print(test)
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
if i[1] == 20 :
c = 'magenta'
if bool20 :
bool20 = False
scatter(i[2],i[3],color=c,s=120,label='taille 5 a 20, queue')
annotate(i[0],(i[2],i[3]))
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
if i[1] == 180 :
c = 'green'
if bool180 :
bool180 = False
scatter(i[2],i[3],color=c,s=120,label='taille 20 a 180, queue')
annotate(i[0],(i[2],i[3]))
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
if i[1] == 2000 :
c = 'blue'
if bool5 :
bool5 = False
scatter(i[2],i[3],color=c,s=120,label='taille 180 a 2000, queue')
annotate(i[0],(i[2],i[3]))
else :
scatter(i[2],i[3],color=c,s=120)
annotate(i[0],(i[2],i[3]))
indice = indice + 1
plt.title("trace de {} en fonction de {}".format(nomb, noma),fontsize=40)
plt.legend()
yticks(fontsize=40)
xticks(fontsize=40)
plt.xlabel("{}".format(noma), fontsize=40)
plt.ylabel("{}".format(nomb), fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\{}F{}.png'.format(nomb,noma))
def selection(a, b) :
dicoTotal = main()
res = list()
for clef in dicoTotal :
if dicoTotal[clef][a] != -1 and dicoTotal[clef][b] != -1 and str(dicoTotal[clef][a]) != 'nan' and str(dicoTotal[clef][a]) != 'nan' and dicoTotal[clef][a] != -100 and dicoTotal[clef][b] != -100:
res = res + [(clef[0], clef[1], dicoTotal[clef][a], dicoTotal[clef][b])]
return(res)
def remove(l,item) :
res = str()
for data in l :
if item == data :
pass
else :
res = res + data
return(res)
def tentative(s) :
ok = False
res = str()
expo = str()
for lettre in s :
if lettre == 'e' :
ok = True
elif ok :
expo = expo + str(lettre)
else :
res = res + str(lettre)
if ok :
res = float(res)
expo = float(expo)
res = res**expo
print('tentative')
return(ok,res)
def serie(k) :
for i in [3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18] :
tracer(i,k)
def pente(a) :
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
nom = noms[a]
dicoTotal = main()
liste1 = list()
liste2 = list()
for clef in dicoTotal :
try :
if dicoTotal[clef][32] == 1 :
liste1 = liste1 + [clef]
elif dicoTotal[clef][32] ==2 :
liste2 = liste2 + [clef]
except :
pass
plt.figure(figsize=(30,20))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste1 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33],))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste2 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
plt.legend()
plt.title('pentes en fonction de {}'.format(nom),fontsize=40)
plt.xlabel("{}".format(nom), fontsize=40)
plt.ylabel("slope", fontsize=40)
yticks(fontsize=40)
xticks(fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\pentesF{}.png'.format(nom))
def penteSans(a) :
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
nom = noms[a]
dicoTotal = effacer(s)
liste1 = list()
liste2 = list()
for clef in dicoTotal :
try :
if dicoTotal[clef][32] == 1 :
liste1 = liste1 + [clef]
elif dicoTotal[clef][32] ==2 :
liste2 = liste2 + [clef]
except :
pass
plt.figure(figsize=(30,20))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste1 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33],))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000, queue')
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
bool1 = True
bool2 = True
bool3 = True
bool4 = True
for clef in liste2 :
if clef[1] == 5 :
c='red'
if bool1 :
bool1 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 0.8 a 5, avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 20 :
c='magenta'
if bool2 :
bool2 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 5 a 20 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 180 :
c='green'
if bool3 :
bool3 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 20 a 180 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
if clef[1] == 2000 :
c='blue'
if bool4 :
bool4 = False
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120,label='taille 180 a 2000 avant derniere pente')
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
else :
scatter(dicoTotal[clef][a],dicoTotal[clef][33],color=c,s=120)
scatter(dicoTotal[clef][a],dicoTotal[clef][34],color=c,s=120)
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][33]))
annotate(clef[0],(dicoTotal[clef][a],dicoTotal[clef][34]))
plt.legend()
plt.title('pentes en fonction de {}'.format(nom),fontsize=40)
plt.xlabel("{}".format(nom), fontsize=40)
plt.ylabel("slope", fontsize=40)
yticks(fontsize=40)
xticks(fontsize=40)
savefig('C:\\Users\\valentin\Desktop\\boulot_mines\\S3Recherche\\Python\\correlation\\pentesF{}.png'.format(nom))
def pentesAvec() :
indice = 3
while indice < 32 :
pente(indice)
indice = indice + 1
def pentesSans() :
indice = 3
while indice < 32 :
penteSans(indice)
indice = indice + 1
def effacer(s) :
noms = {0 : 'echantillon',1 : 'u',2:'v',3:'Okubo Weiss',4: 'Lyapunov exp',5 : 'SST_adv',6: 'SST_AMSRE',7 : 'grad_SST_adv',8: 'age_from_bathy',9: 'lon_from_bathy',10 : 'Shannon_Darwin_mean_all',
11: 'Shannon_Darwin_month_all',12 :'Shannon_Darwin_mean_grp',13: 'Shannon_Darwin_month_grp',14 :'Shannon_Darwin_physat_month',15 : 'Shannon_Darwin_physat_mean',16 :'retention', 17 : 'lon_origin_45d',
18: 'lat_origin_45d',19 :'physat_type',20 : 'abundance',21: 'richness',22 : 'Shannon',23: 'Simpson',24 :'log(alpha)',25 : 'Jevenness',26 :'S.obs',27 :'S.chao1',28 : 'se.chao1',
29 :'S.ACE',30 :'se.ACE',31 : 'distance a la loi de puissance', 32 :'nb de pentes',33 :'pente1',34: 'pente2',35 : 'long1',36 :'long2', 37 : 'poids1', 38 : 'poids2'}
dicoTotal = main()
for clef in dicoTotal :
try :
if dicoTotal[clef][32] == 1:
if dicoTotal[clef][37] > s :
dicoTotal[clef][32]=0
elif dicoTotal[clef][32] == 2 :
if dicoTotal[clef][37] > s :
if dicoTotal[clef][38] > s :
dicoTotal[clef][32] = 0
else :
dicoTotal[clef][32] = 1
dicoTotal[clef][33] = dicoTotal[clef][34]
dicoTotal[clef][37]=dicoTotal[clef][38]
dicoTotal[clef][35]=dicoTotal[clef][36]
if dicoTotal[clef][38] >s and dicoTotal[clef][37] <s :
dicoTotal[clef][32] = 1
except :
pass
return(dicoTotal)
def enlever(s) :
res = str()
for l in s :
if l ==' ' :
pass
else :
res=res + l
return(res)
if __name__ == "__main__" :
pentes()
| gpl-2.0 |
bthirion/scikit-learn | examples/gaussian_process/plot_gpc.py | 103 | 3927 | """
====================================================================
Probabilistic predictions with Gaussian process classification (GPC)
====================================================================
This example illustrates the predicted probability of GPC for an RBF kernel
with different choices of the hyperparameters. The first figure shows the
predicted probability of GPC with arbitrarily chosen hyperparameters and with
the hyperparameters corresponding to the maximum log-marginal-likelihood (LML).
While the hyperparameters chosen by optimizing LML have a considerable larger
LML, they perform slightly worse according to the log-loss on test data. The
figure shows that this is because they exhibit a steep change of the class
probabilities at the class boundaries (which is good) but have predicted
probabilities close to 0.5 far away from the class boundaries (which is bad)
This undesirable effect is caused by the Laplace approximation used
internally by GPC.
The second figure shows the log-marginal-likelihood for different choices of
the kernel's hyperparameters, highlighting the two choices of the
hyperparameters used in the first figure by black dots.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0),
optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f"
% gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f"
% gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print("Accuracy: %.3f (initial) %.3f (optimized)"
% (accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size]))))
print("Log-loss: %.3f (initial) %.3f (optimized)"
% (log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1])))
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c='k', label="Train data")
plt.scatter(X[train_size:, 0], y[train_size:], c='g', label="Test data")
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], 'r',
label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], 'b',
label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
plt.ylim(-0.25, 1.5)
plt.legend(loc="best")
# Plot LML landscape
plt.figure(1)
theta0 = np.logspace(0, 8, 30)
theta1 = np.logspace(-1, 1, 29)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
plt.plot(np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1],
'ko', zorder=10)
plt.plot(np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1],
'ko', zorder=10)
plt.pcolor(Theta0, Theta1, LML)
plt.xscale("log")
plt.yscale("log")
plt.colorbar()
plt.xlabel("Magnitude")
plt.ylabel("Length-scale")
plt.title("Log-marginal-likelihood")
plt.show()
| bsd-3-clause |
ejeschke/ginga | ginga/tests/test_cmap.py | 3 | 2925 | """Unit Tests for the cmap.py functions"""
import numpy as np
import pytest
import ginga.cmap
from ginga.cmap import ColorMap
class TestCmap(object):
def setup_class(self):
pass
def test_ColorMap_init(self):
test_clst = tuple([(x, x, x)
for x in np.linspace(0, 1, ginga.cmap.min_cmap_len)])
test_color_map = ColorMap('test-name', test_clst)
expected = 'test-name'
actual = test_color_map.name
assert expected == actual
expected = ginga.cmap.min_cmap_len
actual = len(test_color_map.clst)
assert expected == actual
expected = (0.0, 0.0, 0.0)
actual = test_color_map.clst[0]
assert np.allclose(expected, actual)
expected = (1.0, 1.0, 1.0)
actual = test_color_map.clst[-1]
assert np.allclose(expected, actual)
def test_ColorMap_init_exception(self):
with pytest.raises(TypeError):
ColorMap('test-name')
def test_cmaps(self):
count = 0
for attribute_name in dir(ginga.cmap):
if attribute_name.startswith('cmap_'):
count = count + 1
expected = count
actual = len(ginga.cmap.cmaps) # Can include matplotlib colormaps
assert expected <= actual
def test_add_cmap(self):
test_clst = tuple([(x, x, x)
for x in np.linspace(0, 1, ginga.cmap.min_cmap_len)])
ginga.cmap.add_cmap('test-name', test_clst)
expected = ColorMap('test-name', test_clst)
actual = ginga.cmap.cmaps['test-name']
assert expected.name == actual.name
assert expected.clst == actual.clst
# Teardown
del ginga.cmap.cmaps['test-name']
def test_add_cmap_exception(self):
test_clst = ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))
with pytest.raises(AssertionError):
ginga.cmap.add_cmap('test-name', test_clst)
def test_get_cmap(self):
test_clst = tuple([(x, x, x)
for x in np.linspace(0, 1, ginga.cmap.min_cmap_len)])
ginga.cmap.add_cmap('test-name', test_clst)
expected = ColorMap('test-name', test_clst)
actual = ginga.cmap.get_cmap('test-name')
assert expected.name == actual.name
assert expected.clst == actual.clst
# Teardown
del ginga.cmap.cmaps['test-name']
def test_get_cmap_exception(self):
with pytest.raises(KeyError):
ginga.cmap.get_cmap('non-existent-name')
def test_get_names(self):
names = []
for attribute_name in dir(ginga.cmap):
if attribute_name.startswith('cmap_'):
names.append(attribute_name[5:])
expected = set(names)
actual = set(ginga.cmap.get_names()) # Can include matplotlib names
assert expected <= actual
# TODO: Add tests for matplotlib functions
# END
| bsd-3-clause |
datasciencebr/serenata-de-amor | research/src/fetch_tse_data.py | 2 | 9446 | """
This script downloads and format some data from TSE website.
The first objective with this data is to obtain a list of all politicians in Brazil.
In march 2017, the data available in TSE website contained information about elected people from the year 1994 to 2016.
Data before 1994 does not contains name of the politicians.
Further, they inform that data from 1994 to 2002 is insconsistent and they are working on it.
The data is available in csv format: one csv file per state, grouped in one zip file per year.
Some of the csv files from TSE contain headers.
Unfortunately, this is not the case for the files we are dealing with here.
For different years there are different numbers of columns, and consequently, different headers.
In this script, after downloading the files, we appropriately name the columns and select a useful subsample of columns to export for future use in Serenata Project.
"""
import pandas as pd
import numpy as np
import os
import urllib
import zipfile
import glob
from tempfile import mkdtemp
TEMP_PATH = mkdtemp()
FILENAME_PREFIX = 'consulta_cand_'
TSE_CANDIDATES_URL = 'http://agencia.tse.jus.br/estatistica/sead/odsele/consulta_cand/'
TODAY = pd.datetime.today().date()
OUTPUT_FILENAME = TODAY.isoformat() + '-tse-candidates.xz'
OUTPUT_DATASET_PATH = os.path.join('data', OUTPUT_FILENAME)
# setting year range from 2004 up to now. this will be modified further to
# include yearsfrom 1994 to 2002
year_list = [str(year) for year in (range(2004, TODAY.year + 1, 2))]
# Download files
for year in year_list:
filename = '{}{}.zip'.format(FILENAME_PREFIX, year)
file_url = TSE_CANDIDATES_URL + filename
output_file = os.path.join(TEMP_PATH, filename)
urllib.request.urlretrieve(file_url, output_file)
# Unzip downloaded files
for year in year_list:
filename = FILENAME_PREFIX + year + '.zip'
filepath = os.path.join(TEMP_PATH, filename)
zip_ref = zipfile.ZipFile(filepath, 'r')
zip_ref.extractall(TEMP_PATH)
zip_ref.close()
# ### Adding the headers
# The following headers were extracted from LEIAME.pdf in consulta_cand_2016.zip.
# headers commented with (*) can be used in the future to integrate with
# other TSE datasets
header_consulta_cand_till2010 = [
"DATA_GERACAO",
"HORA_GERACAO",
"ANO_ELEICAO",
"NUM_TURNO", # (*)
"DESCRICAO_ELEICAO", # (*)
"SIGLA_UF",
"SIGLA_UE", # (*)
"DESCRICAO_UE",
"CODIGO_CARGO", # (*)
"DESCRICAO_CARGO",
"NOME_CANDIDATO",
"SEQUENCIAL_CANDIDATO", # (*)
"NUMERO_CANDIDATO",
"CPF_CANDIDATO",
"NOME_URNA_CANDIDATO",
"COD_SITUACAO_CANDIDATURA",
"DES_SITUACAO_CANDIDATURA",
"NUMERO_PARTIDO",
"SIGLA_PARTIDO",
"NOME_PARTIDO",
"CODIGO_LEGENDA",
"SIGLA_LEGENDA",
"COMPOSICAO_LEGENDA",
"NOME_LEGENDA",
"CODIGO_OCUPACAO",
"DESCRICAO_OCUPACAO",
"DATA_NASCIMENTO",
"NUM_TITULO_ELEITORAL_CANDIDATO",
"IDADE_DATA_ELEICAO",
"CODIGO_SEXO",
"DESCRICAO_SEXO",
"COD_GRAU_INSTRUCAO",
"DESCRICAO_GRAU_INSTRUCAO",
"CODIGO_ESTADO_CIVIL",
"DESCRICAO_ESTADO_CIVIL",
"CODIGO_NACIONALIDADE",
"DESCRICAO_NACIONALIDADE",
"SIGLA_UF_NASCIMENTO",
"CODIGO_MUNICIPIO_NASCIMENTO",
"NOME_MUNICIPIO_NASCIMENTO",
"DESPESA_MAX_CAMPANHA",
"COD_SIT_TOT_TURNO",
"DESC_SIT_TOT_TURNO",
]
header_consulta_cand_at2012 = [
"DATA_GERACAO",
"HORA_GERACAO",
"ANO_ELEICAO",
"NUM_TURNO", # (*)
"DESCRICAO_ELEICAO", # (*)
"SIGLA_UF",
"SIGLA_UE", # (*)
"DESCRICAO_UE",
"CODIGO_CARGO", # (*)
"DESCRICAO_CARGO",
"NOME_CANDIDATO",
"SEQUENCIAL_CANDIDATO", # (*)
"NUMERO_CANDIDATO",
"CPF_CANDIDATO",
"NOME_URNA_CANDIDATO",
"COD_SITUACAO_CANDIDATURA",
"DES_SITUACAO_CANDIDATURA",
"NUMERO_PARTIDO",
"SIGLA_PARTIDO",
"NOME_PARTIDO",
"CODIGO_LEGENDA",
"SIGLA_LEGENDA",
"COMPOSICAO_LEGENDA",
"NOME_LEGENDA",
"CODIGO_OCUPACAO",
"DESCRICAO_OCUPACAO",
"DATA_NASCIMENTO",
"NUM_TITULO_ELEITORAL_CANDIDATO",
"IDADE_DATA_ELEICAO",
"CODIGO_SEXO",
"DESCRICAO_SEXO",
"COD_GRAU_INSTRUCAO",
"DESCRICAO_GRAU_INSTRUCAO",
"CODIGO_ESTADO_CIVIL",
"DESCRICAO_ESTADO_CIVIL",
"CODIGO_NACIONALIDADE",
"DESCRICAO_NACIONALIDADE",
"SIGLA_UF_NASCIMENTO",
"CODIGO_MUNICIPIO_NASCIMENTO",
"NOME_MUNICIPIO_NASCIMENTO",
"DESPESA_MAX_CAMPANHA",
"COD_SIT_TOT_TURNO",
"DESC_SIT_TOT_TURNO",
"NM_EMAIL",
]
header_consulta_cand_from2014 = [
"DATA_GERACAO",
"HORA_GERACAO",
"ANO_ELEICAO",
"NUM_TURNO", # (*)
"DESCRICAO_ELEICAO", # (*)
"SIGLA_UF",
"SIGLA_UE", # (*)
"DESCRICAO_UE",
"CODIGO_CARGO", # (*)
"DESCRICAO_CARGO",
"NOME_CANDIDATO",
"SEQUENCIAL_CANDIDATO", # (*)
"NUMERO_CANDIDATO",
"CPF_CANDIDATO",
"NOME_URNA_CANDIDATO",
"COD_SITUACAO_CANDIDATURA",
"DES_SITUACAO_CANDIDATURA",
"NUMERO_PARTIDO",
"SIGLA_PARTIDO",
"NOME_PARTIDO",
"CODIGO_LEGENDA",
"SIGLA_LEGENDA",
"COMPOSICAO_LEGENDA",
"NOME_LEGENDA",
"CODIGO_OCUPACAO",
"DESCRICAO_OCUPACAO",
"DATA_NASCIMENTO",
"NUM_TITULO_ELEITORAL_CANDIDATO",
"IDADE_DATA_ELEICAO",
"CODIGO_SEXO",
"DESCRICAO_SEXO",
"COD_GRAU_INSTRUCAO",
"DESCRICAO_GRAU_INSTRUCAO",
"CODIGO_ESTADO_CIVIL",
"DESCRICAO_ESTADO_CIVIL",
"CODIGO_COR_RACA",
"DESCRICAO_COR_RACA",
"CODIGO_NACIONALIDADE",
"DESCRICAO_NACIONALIDADE",
"SIGLA_UF_NASCIMENTO",
"CODIGO_MUNICIPIO_NASCIMENTO",
"NOME_MUNICIPIO_NASCIMENTO",
"DESPESA_MAX_CAMPANHA",
"COD_SIT_TOT_TURNO",
"DESC_SIT_TOT_TURNO",
"NM_EMAIL",
]
sel_columns = [
"ANO_ELEICAO",
"NUM_TURNO", # (*)
"DESCRICAO_ELEICAO", # (*)
"SIGLA_UF",
"DESCRICAO_UE",
"DESCRICAO_CARGO",
"NOME_CANDIDATO",
"SEQUENCIAL_CANDIDATO", # (*)
"CPF_CANDIDATO",
"NUM_TITULO_ELEITORAL_CANDIDATO",
"DESC_SIT_TOT_TURNO",
]
# Concatenate all files in one pandas dataframe
cand_df = pd.DataFrame()
for year in year_list:
filesname = FILENAME_PREFIX + year + '*.txt'
filespath = os.path.join(TEMP_PATH, filesname)
files_of_the_year = sorted(glob.glob(filespath))
for file_i in files_of_the_year:
# the following cases do not take into account next elections.
# hopefully, TSE will add headers to the files
if ('2014' in file_i) or ('2016' in file_i):
cand_df_i = pd.read_csv(
file_i,
sep=';',
header=None,
dtype=np.str,
names=header_consulta_cand_from2014,
encoding='iso-8859-1')
elif ('2012' in file_i):
cand_df_i = pd.read_csv(
file_i,
sep=';',
header=None,
dtype=np.str,
names=header_consulta_cand_at2012,
encoding='iso-8859-1')
else:
cand_df_i = pd.read_csv(
file_i,
sep=';',
header=None,
dtype=np.str,
names=header_consulta_cand_till2010,
encoding='iso-8859-1')
cand_df = cand_df.append(cand_df_i[sel_columns])
# this index contains no useful information
cand_df.index = cand_df.reset_index().index
# Translation
headers_translation = {
'ANO_ELEICAO': 'year',
'NUM_TURNO': 'phase', # first round or runoff
'DESCRICAO_ELEICAO': 'description',
'SIGLA_UF': 'state',
'DESCRICAO_UE': 'location',
'DESCRICAO_CARGO': 'post',
'NOME_CANDIDATO': 'name',
# This is not to be used as unique identifier
'SEQUENCIAL_CANDIDATO': 'electoral_id',
'CPF_CANDIDATO': 'cpf',
'NUM_TITULO_ELEITORAL_CANDIDATO': 'voter_id',
'DESC_SIT_TOT_TURNO': 'result',
}
post_translation = {
'VEREADOR': 'city_councilman',
'VICE-PREFEITO': 'vice_mayor',
'PREFEITO': 'mayor',
'DEPUTADO ESTADUAL': 'state_deputy',
'DEPUTADO FEDERAL': 'federal_deputy',
'DEPUTADO DISTRITAL': 'district_deputy',
'SENADOR': 'senator',
'VICE-GOVERNADOR': 'vice_governor',
'GOVERNADOR': 'governor',
'2º SUPLENTE SENADOR': 'senate_second_alternate',
'1º SUPLENTE SENADO': 'senate_first_alternate',
'2º SUPLENTE': 'senate_second_alternate',
'1º SUPLENTE': 'senate_first_alternate',
'VICE-PRESIDENTE': 'vice_president',
'PRESIDENTE': 'president',
}
result_translation = {
'SUPLENTE': 'alternate',
'NÃO ELEITO': 'not_elected',
'#NULO#': 'null',
'ELEITO': 'elected',
'ELEITO POR QP': 'elected_by_party_quota',
'MÉDIA': 'elected',
'ELEITO POR MÉDIA': 'elected',
'#NE#': 'null',
'REGISTRO NEGADO ANTES DA ELEIÇÃO': 'rejected',
'INDEFERIDO COM RECURSO': 'rejected',
'RENÚNCIA/FALECIMENTO/CASSAÇÃO ANTES DA ELEIÇÃO': 'rejected',
'2º TURNO': 'runoff',
'SUBSTITUÍDO': 'replaced',
'REGISTRO NEGADO APÓS A ELEIÇÃO': 'rejected',
'RENÚNCIA/FALECIMENTO COM SUBSTITUIÇÃO': 'replaced',
'RENÚNCIA/FALECIMENTO/CASSAÇÃO APÓS A ELEIÇÃO': 'rejected',
'CASSADO COM RECURSO': 'rejected',
}
cand_df = cand_df.rename(columns=headers_translation)
cand_df.post = cand_df.post.map(post_translation)
cand_df.result = cand_df.result.map(result_translation)
# Exporting data
cand_df.to_csv(
OUTPUT_DATASET_PATH,
encoding='utf-8',
compression='xz',
header=True,
index=False)
| mit |
sinhrks/scikit-learn | examples/model_selection/grid_search_digits.py | 44 | 2672 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
sudikrt/costproML | testproject/temp/test.py | 1 | 3011 | '''
Import Libs
'''
import pandas as pd
import numpy as np
from pandas.tools.plotting import scatter_matrix
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from math import sin, cos, sqrt, atan2, radians
def finddist (lat1,lon1,lat2,lon2):
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
#Read Data
dataset = pd.read_csv("damnm.csv", dtype={'supplydemand':'int','cost':'int'})
#print shape
print dataset.shape
#description
print dataset.describe()
#class
print (dataset.groupby('job').size())
print dataset.columns
dataset['lat'] = dataset['lat'].apply(lambda x: str(x))
dataset['lng'] = dataset['lng'].apply(lambda x: str(x))
#dataset['id'] = dataset['id'].apply(pd.to_numeric)
dataset['id'] = dataset['id'].apply(lambda x: int(x))
dataset['cost'] = dataset['cost'].apply(lambda x: int(x))
print dataset.dtypes
'''
print dataset.describe()
columns = dataset.columns.tolist()
job="Tester"
radius=10
df_ = pd.DataFrame()
for index, row in dataset.iterrows():
if (row["job"] == job):
df_.append(np.array(row))
print df_
columns = dataset.columns.tolist()
columns = [c for c in columns if c not in ["job", "place", "cost"]]
target = "cost"
train = dataset.sample(frac = 0.8, random_state = 1)
test = dataset.loc[~dataset.index.isin(train.index)]
print(train.shape)
print(test.shape)
model = LinearRegression()
model.fit(train[columns], train[target])
predictions = model.predict(test[columns])
print test["cost"]
print predictions
print 'Error rate', mean_squared_error(predictions, test[target])
'''
array = dataset.values
X = array[:,3:6]
Y = array[:,6]
print X
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
knn = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
#knn = KNeighborsClassifier()
knn.fit(X_train, Y_train)
predictions = knn.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| apache-2.0 |
bearicc/3d-soil-vis | convert.py | 1 | 1196 | import scipy as sp
import numpy as np
import pandas as pd
from pyproj import *
import os
filename = ["data/05044572.xyzi",
"data/05044574.xyzi",
"data/05064572.xyzi",
"data/05064574.xyzi",
]
data_list = []
for i in range(len(filename)):
print("Load data "+filename[i]+" ...")
data_list.append(pd.read_csv(filename[i],delimiter=",",names=["x","y","z","i"]).values)
# convert UTM NAD83 Zone 15N to Lat/Lon
p = Proj(init='epsg:26915')
data_write_list = []
for i in range(len(filename)):
print("Convert data "+filename[i]+" ...")
lon,lat = p(data_list[i][:,0],data_list[i][:,1],inverse=True)
data_write = sp.zeros((data_list[i].shape[0],3))
data_write[:,0] = lon
data_write[:,1] = lat
data_write[:,2] = data_list[i][:,2]
#data_write[:,3] = data_list[i][:,3]
data_write_list.append(data_write)
#sp.save(os.path.splitext(filename[i])[0],data_write)
#sp.savetxt(os.path.splitext(filename[i])[0]+".llzi",data_write,fmt="%.8f",delimiter=",")
data_write = data_write_list[0]
for i in range(1,len(filename)):
data_write = sp.concatenate((data_write,data_write_list[i]),axis=0)
sp.save("data/data",data_write)
| gpl-3.0 |
shusenl/scikit-learn | sklearn/neighbors/nearest_centroid.py | 199 | 7249 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
jcrudy/sklearntools | sklearntools/feature_selection.py | 1 | 15903 | import numpy as np
from sklearn.base import MetaEstimatorMixin, is_classifier, clone,\
TransformerMixin
from .sklearntools import STSimpleEstimator, _fit_and_score, DelegatingEstimator,\
BaseDelegatingEstimator, standard_methods
from sklearn.cross_validation import check_cv
from sklearn.metrics.scorer import check_scoring
from sklearn.externals.joblib.parallel import Parallel, delayed
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils import safe_mask
def weighted_average_score_combine(scores):
scores_arr = np.array([tup[:2] for tup in scores])
return np.average(scores_arr[:,0], weights=scores_arr[:,1])
def check_score_combiner(estimator, score_combiner):
if score_combiner is None:
return weighted_average_score_combine
else:
raise NotImplementedError('Score combiner %s not implemented' % str(score_combiner))
# TODO: Remove all CV stuff from this. Instead, rely on composition with CrossValidatingEstimator
class BaseFeatureImportanceEstimatorCV(BaseDelegatingEstimator):
def __init__(self, estimator, cv=None, scoring=None,
score_combiner=None, n_jobs=1, verbose=0, pre_dispatch='2*n_jobs'):
self.estimator = estimator
self.cv = cv
self.scoring = scoring
# self.check_constant_model = check_constant_model
self.score_combiner = score_combiner
self.n_jobs = n_jobs
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self._create_delegates('estimator', standard_methods)
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y=None, sample_weight=None, exposure=None):
cv = check_cv(self.cv, X=X, y=y, classifier=is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
combiner = check_score_combiner(self.estimator, self.score_combiner)
parallel = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch)
n_features = X.shape[1]
data = self._process_args(X=X, y=y, sample_weight=sample_weight, exposure=exposure)
feature_deletion_scores = []
# Get cross-validated scores with all features present
data_ = data.copy()
col_X = self._baseline_feature_subset(X, n_features)
data['X'] = col_X
full_scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
train, test)
for train, test in cv)
self.score_ = combiner(full_scores)
# For each feature, remove that feature and get the cross-validation scores
for col in range(n_features):
col_X = self._feature_subset(X, n_features, col)
data_ = data.copy()
data_['X'] = col_X
scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
train, test)
for train, test in cv)
# test_features = np.ones(shape=n_features, dtype=bool)
# if col_X is not None:
# data_ = data.copy()
# data_['X'] = col_X
# scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
# train, test)
# for train, test in cv)
#
#
# if n_features > 1:
# test_features[col] = False
# data_['X'] = X[:, test_features]
# scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
# train, test)
# for train, test in cv)
# elif self.check_constant_model:
# # If there's only one feature to begin with, do the fitting and scoring on a
# # constant predictor.
# data_['X'] = np.ones(shape=(X.shape[0], 1))
# scores = parallel(delayed(_fit_and_score)(clone(self.estimator), data_, scorer,
# train, test)
# for train, test in cv)
# else:
# scores = full_scores
score = combiner(scores)
feature_deletion_scores.append(score)
# Higher scores are better. Higher feature importance means the feature is more important.
# This code reconciles these facts.
self.feature_importances_ = self._calc_importances(np.array(feature_deletion_scores), self.score_)
# Finally, fit on the full data set
self.estimator_ = clone(self.estimator).fit(**data)
# A fit method should always return self for chaining purposes
return self
def predict(self, X, *args, **kwargs):
return self.estimator_.predict(X, *args, **kwargs)
def score(self, X, y, sample_weight=None):
scorer = check_scoring(self.estimator, scoring=self.scoring)
return scorer(self, X, y, sample_weight)
class SingleEliminationFeatureImportanceEstimatorCV(BaseFeatureImportanceEstimatorCV):
def _calc_importances(self, scores, baseline_score):
return baseline_score - scores
def _baseline_feature_subset(self, X, n_features):
return X
def _feature_subset(self, X, n_features, col):
if n_features > 1:
mask = np.ones(shape=n_features, dtype=bool)
mask[col] = False
return X[:, mask]
else:
return np.ones(shape=(X.shape[0], 1))
class UnivariateFeatureImportanceEstimatorCV(BaseFeatureImportanceEstimatorCV):
def _calc_importances(self, scores, baseline_score):
return scores
def _baseline_feature_subset(self, X, n_features):
return X
def _feature_subset(self, X, n_features, col):
mask = np.zeros(shape=n_features, dtype=bool)
mask[col] = True
return X[:, mask]
class STSelector(STSimpleEstimator, SelectorMixin, MetaEstimatorMixin, TransformerMixin):
# Override transform method from SelectorMixin because it doesn't handle the
# case of selecting zero features the way I want it to.
def transform(self, X, exposure=None):
"""Reduce X to the selected features.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
X_r : array of shape [n_samples, n_selected_features]
The input samples with only the selected features.
"""
mask = self.get_support()
if not mask.any():
return np.ones(shape=(X.shape[0], 1))
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return X[:, safe_mask(X, mask)]
@if_delegate_has_method(delegate='estimator')
def predict(self, X, exposure=None):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.predict(**args)
@if_delegate_has_method(delegate='estimator')
def score(self, X, y=None, sample_weight=None, exposure=None):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
args = self._process_args(X=X, y=y, sample_weight=sample_weight,
exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.score(**args)
def _get_support_mask(self):
return self.support_
@property
def _estimator_type(self):
return self.estimator._estimator_type
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X, exposure=None):
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.decision_function(**args)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X, exposure=None):
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.predict_proba(**args)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X, exposure=None):
args = self._process_args(X=X, exposure=exposure)
args['X'] = self.transform(args['X'])
return self.estimator_.predict_log_proba(**args)
# class ForwardStepwiseFeatureSelector(STSelector, MetaEstimatorMixin):
# def __init__(self, estimator, scoring=None, check_constant_model=True):
# self.estimator = estimator
# self.scoring = scoring
# self.check_constant_model = check_constant_model
#
# def fit(self, X, y, sample_weight=None, exposure=None):
# scorer = check_scoring(self.estimator, scoring=self.scoring)
# n_features = 0 if self.check_constant_model else 1
# args = self._process_args(X=X, y=y, sample_weight=sample_weight,
# exposure=exposure)
#
# support = np.zeros(shape=n_features, dtype=bool)
# best_score = -float('inf')
# best_support = None
# best_n_features = None
# sequence = []
# scores = []
#
# if self.check_constant_model:
# args_ = args.copy()
# args_['X'] = np.ones(shape=(X.shape[0],1))
# # Fit the estimator
# estimator = clone(self.estimator).fit(**args)
#
# # Score the estimator
# if self.scoring is None and hasattr(estimator, 'score_'):
# score = estimator.score_
# else:
# score = scorer(estimator, X, y, sample_weight)
#
# # Compare to previous tries
# if score > best_score:
# best_score = score
# best_support = np.zeros_like(support)
# best_n_features = 0
# scores.append(score)
#
# max_features = X.shape[1]
# while np.sum(support) <= max_features:
# args_ = args.copy()
# args_['X'] = X[:, support]
#
# # Fit the estimator
# estimator = clone(self.estimator).fit(**args)
#
# # Score the estimator
# if self.scoring is None and hasattr(estimator, 'score_'):
# score = estimator.score_
# else:
# score = scorer(estimator, X, y, sample_weight)
# scores.append(score)
#
# # Compare to previous tries
# if score > best_score:
# best_score = score
# best_support = support.copy()
# best_n_features = np.sum(support)
#
# # Remove the least important feature from the support for next time
# best_feature = np.argmax(estimator.feature_importances_)
# best_feature_idx = np.argwhere(support)[best_feature][0]
# support[best_feature] = True
# sequence.append(best_feature_idx)
#
class BestKFeatureSelector(STSelector):
def __init__(self, estimator, k):
self.estimator = estimator
self.k = k
def fit(self, X, y=None, sample_weight=None, exposure=None):
args = self._process_args(X=X, y=y, sample_weight=sample_weight,
exposure=exposure)
self.estimator_ = clone(self.estimator).fit(**args)
k_best = np.argsort(self.estimator_.feature_importances_)[::-1][:self.k]
self.support_ = np.zeros(shape=X.shape[1], dtype=bool)
self.support_[k_best] = True
return self
class BackwardEliminationEstimator(STSelector):
def __init__(self, estimator, scoring=None, check_constant_model=True):
self.estimator = estimator
self.scoring = scoring
self.check_constant_model = check_constant_model
def fit(self, X, y=None, sample_weight=None, exposure=None):
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
# sample_weight = kwargs.get('sample_weight', None)
# Do stepwise backward elimination to find best feature set
support = np.ones(shape=n_features, dtype=bool)
best_score = -float('inf')
best_support = None
best_n_features = None
elimination = []
scores = []
fit_args = self._process_args(X=X, y=y, sample_weight=sample_weight,
exposure=exposure)
while np.sum(support) >= 1:
# Fit the estimator
args = fit_args.copy()
args['X'] = X[:, support]
estimator = clone(self.estimator).fit(**args)
# Score the estimator
if self.scoring is None and hasattr(estimator, 'score_'):
score = estimator.score_
else:
score = scorer(estimator, **args)
scores.append(score)
# Compare to previous tries
if score > best_score:
best_score = score
best_support = support.copy()
best_n_features = np.sum(support)
# Remove the least important feature from the support for next time
worst_feature = np.argmin(estimator.feature_importances_)
worst_feature_idx = np.argwhere(support)[worst_feature][0]
support[worst_feature_idx] = False
elimination.append(worst_feature_idx)
# Score a constant input model in case it's the best choice.
# (This would mean the predictors are essentially useless.)
if self.check_constant_model:
# Fit the estimator
args = fit_args.copy()
args['X'] = np.ones(shape=(X.shape[0],1))
estimator = clone(self.estimator).fit(**args)
# Score the estimator
if self.scoring is None and hasattr(estimator, 'score_'):
score = estimator.score_
else:
score = scorer(estimator, **args)
# Compare to previous tries
if score > best_score:
best_score = score
best_support = np.zeros_like(support)
best_n_features = 0
scores.append(score)
# Set attributes for best feature set
self.n_input_features_ = n_features
self.n_features_ = best_n_features
self.support_ = best_support
self.elimination_sequence_ = np.array(elimination)
self.scores_ = np.array(scores)
# Finally, fit on the full data set with the selected set of features
args = fit_args.copy()
args['X'] = X[:, self.support_]
self.estimator_ = clone(self.estimator).fit(**args)
return self
| bsd-3-clause |
e-koch/TurbuStat | Examples/paper_plots/test_fBM_delvar_vs_idl.py | 2 | 1776 |
'''
Compare Turbustat's Delta-variance to the original IDL code.
'''
from turbustat.statistics import DeltaVariance
from turbustat.simulator import make_extended
import astropy.io.fits as fits
from astropy.table import Table
import matplotlib.pyplot as plt
import astropy.units as u
import seaborn as sb
font_scale = 1.25
width = 4.2
# Keep the default ratio used in seaborn. This can get overwritten.
height = (4.4 / 6.4) * width
figsize = (width, height)
sb.set_context("paper", font_scale=font_scale,
rc={"figure.figsize": figsize})
sb.set_palette("colorblind")
col_pal = sb.color_palette()
plt.rcParams['axes.unicode_minus'] = False
size = 256
markers = ['D', 'o']
# Make a single figure example to save space in the paper.
fig = plt.figure(figsize=figsize)
slope = 3.0
test_img = fits.PrimaryHDU(make_extended(size, powerlaw=slope))
# The power-law behaviour continues up to ~1/4 of the size
delvar = DeltaVariance(test_img).run(xlow=3 * u.pix,
xhigh=0.25 * size * u.pix,
boundary='wrap')
plt.xscale("log")
plt.yscale("log")
plt.errorbar(delvar.lags.value, delvar.delta_var,
yerr=delvar.delta_var_error,
fmt=markers[0], label='TurbuStat')
# Now plot the IDL output
tab = Table.read("deltavar_{}.txt".format(slope), format='ascii')
# First is pixel scale, second is delvar, then delvar error, and finally
# the fit values
plt.errorbar(tab['col1'], tab['col2'], yerr=tab['col3'],
fmt=markers[1], label='IDL')
plt.grid()
plt.legend(frameon=True)
plt.ylabel(r"$\Delta$-Variance")
plt.xlabel("Scales (pix)")
plt.tight_layout()
plt.savefig("../figures/delvar_vs_idl.png")
plt.savefig("../figures/delvar_vs_idl.pdf")
plt.close()
| mit |
buzz2vatsal/Deep-Bench | YOLO/retrain_yolo.py | 1 | 12522 | """
This is a script that can be used to retrain the YOLOv2 model for your own dataset.
"""
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yolo.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yolo.utils.draw_boxes import draw_boxes
# Args
argparser = argparse.ArgumentParser(
description="Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.")
argparser.add_argument(
'-d',
'--data_path',
help="path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'",
default=os.path.join('..', 'DATA', 'underwater_data.npz'))
argparser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default=os.path.join('model_data', 'yolo_anchors.txt'))
argparser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to pascal_classes.txt',
default=os.path.join('..', 'DATA', 'underwater_classes.txt'))
# Default anchor boxes
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
def _main(args):
data_path = os.path.expanduser(args.data_path)
classes_path = os.path.expanduser(args.classes_path)
anchors_path = os.path.expanduser(args.anchors_path)
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
data = np.load(data_path) # custom data saved as a numpy file.
# has 2 arrays: an object array 'boxes' (variable length of boxes in each image)
# and an array of images 'images'
image_data, boxes = process_data(data['images'], data['boxes'])
anchors = YOLO_ANCHORS
detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)
model_body, model = create_model(anchors, class_names)
train(
model,
class_names,
anchors,
image_data,
boxes,
detectors_mask,
matching_true_boxes
)
draw(model_body,
class_names,
anchors,
image_data,
image_set='val', # assumes training/validation split is 0.9
weights_name='trained_stage_3_best.h5',
save_all=False)
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
if os.path.isfile(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
else:
Warning("Could not open anchors file, using default.")
return YOLO_ANCHORS
def process_data(images, boxes=None):
'''processes the data'''
images = [PIL.Image.fromarray(i) for i in images]
orig_size = np.array([images[0].width, images[0].height])
orig_size = np.expand_dims(orig_size, axis=0)
# Image preprocessing.
processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
if boxes is not None:
# Box preprocessing.
# Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
boxes = [box.reshape((-1, 5)) for box in boxes]
# Get extents as y_min, x_min, y_max, x_max, class for comparision with
# model output.
boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]
# Get box parameters as x_center, y_center, box_width, box_height, class.
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]
boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
# find the max number of boxes
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
# add zero pad for training
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
return np.array(processed_images)
def get_detector_mask(boxes, anchors):
'''
Precompute detectors_mask and matching_true_boxes for training.
Detectors mask is 1 for each spatial position in the final conv layer and
anchor that should be active for the given boxes and 0 otherwise.
Matching true boxes gives the regression targets for the ground truth box
that caused a detector to be active or 0 otherwise.
'''
detectors_mask = [0 for i in range(len(boxes))]
matching_true_boxes = [0 for i in range(len(boxes))]
for i, box in enumerate(boxes):
detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])
return np.array(detectors_mask), np.array(matching_true_boxes)
def create_model(anchors, class_names, load_pretrained=True, freeze_body=True):
'''
returns the body of the model and the model
# Params:
load_pretrained: whether or not to load the pretrained model or initialize all weights
freeze_body: whether or not to freeze all weights except for the last layer's
# Returns:
model_body: YOLOv2 with new output layer
model: YOLOv2 with custom loss Lambda layer
'''
detectors_mask_shape = (13, 13, 5, 1)
matching_boxes_shape = (13, 13, 5, 5)
# Create model input layers.
image_input = Input(shape=(416, 416, 3))
boxes_input = Input(shape=(None, 5))
detectors_mask_input = Input(shape=detectors_mask_shape)
matching_boxes_input = Input(shape=matching_boxes_shape)
# Create model body.
yolo_model = yolo_body(image_input, len(anchors), len(class_names))
topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)
if load_pretrained:
# Save topless yolo:
topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')
if not os.path.exists(topless_yolo_path):
print("CREATING TOPLESS WEIGHTS FILE")
yolo_path = os.path.join('model_data', 'yolo.h5')
model_body = load_model(yolo_path)
model_body = Model(model_body.inputs, model_body.layers[-2].output)
model_body.save_weights(topless_yolo_path)
topless_yolo.load_weights(topless_yolo_path)
if freeze_body:
for layer in topless_yolo.layers:
layer.trainable = False
final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear')(topless_yolo.output)
model_body = Model(image_input, final_layer)
# Place model loss on CPU to reduce GPU memory usage.
with tf.device('/cpu:0'):
# TODO: Replace Lambda with custom Keras layer for loss.
model_loss = Lambda(
yolo_loss,
output_shape=(1, ),
name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': len(class_names)})([
model_body.output, boxes_input,
detectors_mask_input, matching_boxes_input
])
model = Model(
[model_body.input, boxes_input, detectors_mask_input,
matching_boxes_input], model_loss)
return model_body, model
def train(model, class_names, anchors, image_data, boxes, detectors_mask, matching_true_boxes, validation_split=0.1):
'''
retrain/fine-tune the model
logs training with tensorboard
saves training weights in current directory
best weights according to val_loss is saved as trained_stage_3_best.h5
'''
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
logging = TensorBoard()
checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
np.zeros(len(image_data)),
validation_split=validation_split,
batch_size=32,
epochs=5,
callbacks=[logging])
model.save_weights('trained_stage_1.h5')
model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)
model.load_weights('trained_stage_1.h5')
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
np.zeros(len(image_data)),
validation_split=0.1,
batch_size=8,
epochs=30,
callbacks=[logging])
model.save_weights('trained_stage_2.h5')
model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
np.zeros(len(image_data)),
validation_split=0.1,
batch_size=8,
epochs=30,
callbacks=[logging, checkpoint, early_stopping])
model.save_weights('trained_stage_3.h5')
def draw(model_body, class_names, anchors, image_data, image_set='val',
weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
'''
Draw bounding boxes on image data
'''
if image_set == 'train':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[:int(len(image_data)*.9)]])
elif image_set == 'val':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[int(len(image_data)*.9):]])
elif image_set == 'all':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data])
else:
ValueError("draw argument image_set must be 'train', 'val', or 'all'")
# model.load_weights(weights_name)
print(image_data.shape)
model_body.load_weights(weights_name)
# Create output variables for prediction.
yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs, input_image_shape, score_threshold=0.07, iou_threshold=0)
# Run prediction on overfit image.
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(image_data)):
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
model_body.input: image_data[i],
input_image_shape: [image_data.shape[2], image_data.shape[3]],
K.learning_phase(): 0
})
print('Found {} boxes for image.'.format(len(out_boxes)))
print(out_boxes)
# Plot image with predicted boxes.
image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
class_names, out_scores)
# Save the image:
if save_all or (len(out_boxes) > 0):
image = PIL.Image.fromarray(image_with_boxes)
image.save(os.path.join(out_path,str(i)+'.png'))
# To display (pauses the program):
# plt.imshow(image_with_boxes, interpolation='nearest')
# plt.show()
if __name__ == '__main__':
args = argparser.parse_args()
_main(args)
| cc0-1.0 |
TariqAHassan/ZeitSci | analysis/supplementary_fns.py | 1 | 4393 | import re
import time
from itertools import chain
def pprint(string, n=80):
"""
Pretty print a string, breaking it in chucks on length n.
"""
if not isinstance(string, str):
raise ValueError("Input must be a string!")
if len(string) < n:
print(string)
else:
# see http://stackoverflow.com/questions/9475241/split-python-string-every-nth-character
string_split = [string[i:i + n] for i in range(0, len(string), n)]
for l in string_split:
print(l.lstrip())
def lprint(input_list, tstep=None):
"""
:param input_list:
:return:
"""
if isinstance(input_list, dict):
for k, v in input_list.items():
print(k, " ---> ", v)
if not isinstance(input_list, list) and not isinstance(input_list, dict):
print(input_list)
if len(input_list) == 0:
print("--- Empty List ---")
elif isinstance(input_list, list):
for l in range(len(input_list)):
if isinstance(tstep, int) or isinstance(tstep, float):
time.sleep(tstep)
print(str(l) + ":", input_list[l])
def cln(i, extent=1):
"""
String white space 'cleaner'.
:param i:
:param extent: 1 --> all white space reduced to length 1; 2 --> removal of all white space.
:return:
"""
if isinstance(i, str) and i != "":
if extent == 1:
return re.sub(r"\s\s+", " ", i)
elif extent == 2:
return re.sub(r"\s+", "", i)
else:
return i
# else:
# return None
#
# if es:
# return to_return.lstrip().rstrip()
# else:
# return to_return
def insen_replace(input_str, term, replacement):
"""
String replace function which is insentiive to case
replaces string regardless of case.
see: http://stackoverflow.com/questions/919056/case-insensitive-replace
:param input_str:
:param term:
:param replacement:
:return:
"""
disp_term = re.compile(re.escape(term), re.IGNORECASE)
return disp_term.sub(replacement, disp[i]).lstrip().rstrip()
def partial_match(input_str, looking_for):
"""
:param input_str:
:param looking_for:
:return:
"""
if isinstance(input_str, str) and isinstance(looking_for, str):
return cln(looking_for.lower(), 1) in cln(input_str.lower(), 1)
else:
return False
def partial_list_match(input_str, allowed_list):
"""
:param input_str:
:param allowed_list:
:return:
"""
allowed_list = [cln(i.lower(), 1).lstrip().rstrip() for i in allowed_list]
for i in allowed_list:
if partial_match(input_str=input_str, looking_for=i):
return True
return False
def endpoints_str(input, start, end=","):
"""
:param input:
:param start:
:param end:
:return:
"""
try:
return cln(start + input[len(start):-len(end)], 1).lstrip().rstrip()
except:
return None
def pandas_col_shift(data_frame, column, move_to=0):
"""
Please see Sachinmm's StackOverflow answer:
http://stackoverflow.com/questions/25122099/move-column-by-name-to-front-of-table-in-pandas
:param data_frame: a pandas dataframe
:param column: the column to be moved
:param move_to: position in df to move the column to; defaults to 0 (first)
:return:
"""
if not (0 <= move_to <= data_frame.shape[1]):
raise AttributeError("Invalid move_to value.")
if not isinstance(column, str):
raise ValueError("the column was not provided as a string.")
if column not in data_frame.columns.tolist():
raise AttributeError("the dataframe has no column: %s." % (column))
cols = data_frame.columns.tolist()
cols.insert(move_to, cols.pop(cols.index(column)))
return data_frame.reindex(columns=cols)
def items_present_test(input_list, clist):
"""
Check if any of the items in clist are in input_list
:param input_list: a list to look for them in
:param clist: things you're looking for
:return:
"""
return any(x in input_list for x in clist)
def fast_flatten(input_list):
return list(chain.from_iterable(input_list))
def multi_replace(input_str, to_remove):
for tr in to_remove:
input_str = input_str.replace(tr, "")
return input_str
| gpl-3.0 |
malie/theano-rbm-on-word-tuples | read.py | 1 | 1782 | import re
from sklearn.feature_extraction.text import CountVectorizer
def find_common_words(all_words, num_most_frequent_words):
vectorizer = CountVectorizer(
stop_words=None, # 'english',
max_features=num_most_frequent_words,
binary=True)
vectorizer.fit(all_words)
return (vectorizer.vocabulary_, vectorizer.get_feature_names())
def read_odyssey_tuples(tuplesize,
num_most_frequent_words,
verbose=False):
with open('pg1727-part.txt', 'r') as file:
text = re.findall(r'[a-zA-Z]+', file.read())
(common_voc, common_names) = find_common_words(text, num_most_frequent_words)
print(common_voc)
print(common_names)
res = []
dist = 12
for i in range(len(text)-dist):
first_word = text[i]
if first_word in common_voc:
a = common_voc[first_word]
tuple = [a]
for j in range(dist):
next_word = text[i+1+j]
if next_word in common_voc:
n = common_voc[next_word]
tuple.append(n)
if len(tuple) == tuplesize:
res.append(tuple)
if verbose and i < 200:
print(tuple)
print('from ', text[i:i+2+j])
break
return (res, common_names)
if __name__ == "__main__":
num_words = 20
(tuples, words) = read_odyssey_tuples(3, num_words, verbose=True)
print('number of common word tuples: ', len(tuples))
for s in range(10):
for i in tuples[s]:
print(i, words[i])
print('')
ts = set([(a,b,c) for a,b,c in tuples])
print('distinct word tuples: ', len(ts))
| bsd-3-clause |
CallaJun/hackprince | indico/matplotlib/axes/_axes.py | 10 | 260820 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import reduce, xrange, zip, zip_longest
import math
import warnings
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.cbook as cbook
from matplotlib.cbook import _string_to_bool, mplDeprecation
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as _ # <-registers a date unit converter
from matplotlib import docstring
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.markers as mmarkers
import matplotlib.mlab as mlab
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.stackplot as mstack
import matplotlib.streamplot as mstream
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.tri as mtri
import matplotlib.transforms as mtrans
from matplotlib.container import BarContainer, ErrorbarContainer, StemContainer
from matplotlib.axes._base import _AxesBase
from matplotlib.axes._base import _process_plot_format
iterable = cbook.iterable
is_string_like = cbook.is_string_like
is_sequence_of_strings = cbook.is_sequence_of_strings
# The axes module contains all the wrappers to plotting functions.
# All the other methods should go in the _AxesBase class.
class Axes(_AxesBase):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
### Labelling, legend and texts
def get_title(self, loc="center"):
"""Get an axes title.
Get one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
loc : {'center', 'left', 'right'}, str, optional
Which title to get, defaults to 'center'
Returns
-------
title: str
The title text string.
"""
try:
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
return title.get_text()
@docstring.dedent_interpd
def set_title(self, label, fontdict=None, loc="center", **kwargs):
"""
Set a title for the axes.
Set one of the three available axes titles. The available titles
are positioned above the axes in the center, flush with the left
edge, and flush with the right edge.
Parameters
----------
label : str
Text to use for the title
fontdict : dict
A dictionary controlling the appearance of the title text,
the default `fontdict` is::
{'fontsize': rcParams['axes.titlesize'],
'fontweight' : rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, str, optional
Which title to set, defaults to 'center'
Returns
-------
text : :class:`~matplotlib.text.Text`
The matplotlib text instance representing the title
Other parameters
----------------
kwargs : text properties
Other keyword arguments are text properties, see
:class:`~matplotlib.text.Text` for a list of valid text
properties.
"""
try:
title = {'left': self._left_title,
'center': self.title,
'right': self._right_title}[loc.lower()]
except KeyError:
raise ValueError("'%s' is not a valid location" % loc)
default = {
'fontsize': rcParams['axes.titlesize'],
'fontweight': rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc.lower()}
title.set_text(label)
title.update(default)
if fontdict is not None:
title.update(fontdict)
title.update(kwargs)
return title
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the xaxis.
Parameters
----------
xlabel : string
x label
labelpad : scalar, optional, default: None
spacing in points between the label and the x-axis
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
@docstring.dedent_interpd
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the yaxis
Parameters
----------
ylabel : string
y label
labelpad : scalar, optional, default: None
spacing in points between the label and the x-axis
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
def _get_legend_handles(self, legend_handler_map=None):
"""
Return a generator of artists that can be used as handles in
a legend.
"""
handles_original = (self.lines + self.patches +
self.collections + self.containers)
handler_map = mlegend.Legend.get_default_handler_map()
if legend_handler_map is not None:
handler_map = handler_map.copy()
handler_map.update(legend_handler_map)
has_handler = mlegend.Legend.get_legend_handler
for handle in handles_original:
label = handle.get_label()
if label != '_nolegend_' and has_handler(handler_map, handle):
yield handle
def get_legend_handles_labels(self, legend_handler_map=None):
"""
Return handles and labels for legend
``ax.legend()`` is equivalent to ::
h, l = ax.get_legend_handles_labels()
ax.legend(h, l)
"""
handles = []
labels = []
for handle in self._get_legend_handles(legend_handler_map):
label = handle.get_label()
if label and not label.startswith('_'):
handles.append(handle)
labels.append(label)
return handles, labels
def legend(self, *args, **kwargs):
"""
Places a legend on the axes.
To make a legend for lines which already exist on the axes
(via plot for instance), simply call this function with an iterable
of strings, one for each legend item. For example::
ax.plot([1, 2, 3])
ax.legend(['A simple line'])
However, in order to keep the "label" and the legend element
instance together, it is preferable to specify the label either at
artist creation, or by calling the
:meth:`~matplotlib.artist.Artist.set_label` method on the artist::
line, = ax.plot([1, 2, 3], label='Inline label')
# Overwrite the label by calling the method.
line.set_label('Label via method')
ax.legend()
Specific lines can be excluded from the automatic legend element
selection by defining a label starting with an underscore.
This is default for all artists, so calling :meth:`legend` without
any arguments and without setting the labels manually will result in
no legend being drawn.
For full control of which artists have a legend entry, it is possible
to pass an iterable of legend artists followed by an iterable of
legend labels respectively::
legend((line1, line2, line3), ('label1', 'label2', 'label3'))
Parameters
----------
loc : int or string or pair of floats, default: 0
The location of the legend. Possible codes are:
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
Alternatively can be a 2-tuple giving ``x, y`` of the lower-left
corner of the legend in axes coordinates (in which case
``bbox_to_anchor`` will be ignored).
bbox_to_anchor : :class:`matplotlib.transforms.BboxBase` instance \
or tuple of floats
Specify any arbitrary location for the legend in `bbox_transform`
coordinates (default Axes coordinates).
For example, to put the legend's upper right hand corner in the
center of the axes the following keywords can be used::
loc='upper right', bbox_to_anchor=(0.5, 0.5)
ncol : integer
The number of columns that the legend has. Default is 1.
prop : None or :class:`matplotlib.font_manager.FontProperties` or dict
The font properties of the legend. If None (default), the current
:data:`matplotlib.rcParams` will be used.
fontsize : int or float or {'xx-small', 'x-small', 'small', 'medium',\
'large', 'x-large', 'xx-large'}
Controls the font size of the legend. If the value is numeric the
size will be the absolute font size in points. String values are
relative to the current default font size. This argument is only
used if `prop` is not specified.
numpoints : None or int
The number of marker points in the legend when creating a legend
entry for a line/:class:`matplotlib.lines.Line2D`.
Default is ``None`` which will take the value from the
``legend.numpoints`` :data:`rcParam<matplotlib.rcParams>`.
scatterpoints : None or int
The number of marker points in the legend when creating a legend
entry for a scatter plot/
:class:`matplotlib.collections.PathCollection`.
Default is ``None`` which will take the value from the
``legend.scatterpoints`` :data:`rcParam<matplotlib.rcParams>`.
scatteryoffsets : iterable of floats
The vertical offset (relative to the font size) for the markers
created for a scatter plot legend entry. 0.0 is at the base the
legend text, and 1.0 is at the top. To draw all markers at the
same height, set to ``[0.5]``. Default ``[0.375, 0.5, 0.3125]``.
markerscale : None or int or float
The relative size of legend markers compared with the originally
drawn ones. Default is ``None`` which will take the value from
the ``legend.markerscale`` :data:`rcParam <matplotlib.rcParams>`.
frameon : None or bool
Control whether a frame should be drawn around the legend.
Default is ``None`` which will take the value from the
``legend.frameon`` :data:`rcParam<matplotlib.rcParams>`.
fancybox : None or bool
Control whether round edges should be enabled around
the :class:`~matplotlib.patches.FancyBboxPatch` which
makes up the legend's background.
Default is ``None`` which will take the value from the
``legend.fancybox`` :data:`rcParam<matplotlib.rcParams>`.
shadow : None or bool
Control whether to draw a shadow behind the legend.
Default is ``None`` which will take the value from the
``legend.shadow`` :data:`rcParam<matplotlib.rcParams>`.
framealpha : None or float
Control the alpha transparency of the legend's frame.
Default is ``None`` which will take the value from the
``legend.framealpha`` :data:`rcParam<matplotlib.rcParams>`.
mode : {"expand", None}
If `mode` is set to ``"expand"`` the legend will be horizontally
expanded to fill the axes area (or `bbox_to_anchor` if defines
the legend's size).
bbox_transform : None or :class:`matplotlib.transforms.Transform`
The transform for the bounding box (`bbox_to_anchor`). For a value
of ``None`` (default) the Axes'
:data:`~matplotlib.axes.Axes.transAxes` transform will be used.
title : str or None
The legend's title. Default is no title (``None``).
borderpad : float or None
The fractional whitespace inside the legend border.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.borderpad`` :data:`rcParam<matplotlib.rcParams>`.
labelspacing : float or None
The vertical space between the legend entries.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.labelspacing`` :data:`rcParam<matplotlib.rcParams>`.
handlelength : float or None
The length of the legend handles.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.handlelength`` :data:`rcParam<matplotlib.rcParams>`.
handletextpad : float or None
The pad between the legend handle and text.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.handletextpad`` :data:`rcParam<matplotlib.rcParams>`.
borderaxespad : float or None
The pad between the axes and legend border.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.borderaxespad`` :data:`rcParam<matplotlib.rcParams>`.
columnspacing : float or None
The spacing between columns.
Measured in font-size units.
Default is ``None`` which will take the value from the
``legend.columnspacing`` :data:`rcParam<matplotlib.rcParams>`.
handler_map : dict or None
The custom dictionary mapping instances or types to a legend
handler. This `handler_map` updates the default handler map
found at :func:`matplotlib.legend.Legend.get_legend_handler_map`.
Notes
-----
Not all kinds of artist are supported by the legend command.
See :ref:`plotting-guide-legend` for details.
Examples
--------
.. plot:: mpl_examples/api/legend_demo.py
"""
handlers = kwargs.get('handler_map', {}) or {}
# Support handles and labels being passed as keywords.
handles = kwargs.pop('handles', None)
labels = kwargs.pop('labels', None)
if handles is not None and labels is None:
labels = [handle.get_label() for handle in handles]
for label, handle in zip(labels[:], handles[:]):
if label.startswith('_'):
warnings.warn('The handle {!r} has a label of {!r} which '
'cannot be automatically added to the '
'legend.'.format(handle, label))
labels.remove(label)
handles.remove(handle)
elif labels is not None and handles is None:
# Get as many handles as there are labels.
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
# No arguments - automatically detect labels and handles.
elif len(args) == 0:
handles, labels = self.get_legend_handles_labels(handlers)
if not handles:
warnings.warn("No labelled objects found. "
"Use label='...' kwarg on individual plots.")
return None
# One argument. User defined labels - automatic handle detection.
elif len(args) == 1:
labels, = args
# Get as many handles as there are labels.
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
# Two arguments. Either:
# * user defined handles and labels
# * user defined labels and location (deprecated)
elif len(args) == 2:
if is_string_like(args[1]) or isinstance(args[1], int):
cbook.warn_deprecated('1.4', 'The "loc" positional argument '
'to legend is deprecated. Please use '
'the "loc" keyword instead.')
labels, loc = args
handles = [handle for handle, _
in zip(self._get_legend_handles(handlers), labels)]
kwargs['loc'] = loc
else:
handles, labels = args
# Three arguments. User defined handles, labels and
# location (deprecated).
elif len(args) == 3:
cbook.warn_deprecated('1.4', 'The "loc" positional argument '
'to legend is deprecated. Please '
'use the "loc" keyword instead.')
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend.')
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
self.legend_._remove_method = lambda h: setattr(self, 'legend_', None)
return self.legend_
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
Add text to the axes.
Add text in string `s` to axis at location `x`, `y`, data
coordinates.
Parameters
----------
x, y : scalars
data coordinates
s : string
text
fontdict : dictionary, optional, default: None
A dictionary to override the default text properties. If fontdict
is None, the defaults are determined by your rc parameters.
withdash : boolean, optional, default: False
Creates a `~matplotlib.text.TextWithDash` instance instead of a
`~matplotlib.text.Text` instance.
Other parameters
----------------
kwargs : `~matplotlib.text.Text` properties.
Other miscellaneous text parameters.
Examples
--------
Individual keyword arguments can be used to override any given
parameter::
>>> text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
>>> text(0.5, 0.5,'matplotlib', horizontalalignment='center',
... verticalalignment='center',
... transform=ax.transAxes)
You can put a rectangular box around the text instance (e.g., to
set a background color) by using the keyword `bbox`. `bbox` is
a dictionary of `~matplotlib.patches.Rectangle`
properties. For example::
>>> text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
"""
default = {
'verticalalignment': 'baseline',
'horizontalalignment': 'left',
'transform': self.transData,
'clip_on': False}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s)
else:
t = mtext.Text(
x=x, y=y, text=s)
self._set_artist_props(t)
t.update(default)
if fontdict is not None:
t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
t.set_clip_path(self.patch)
return t
@docstring.dedent_interpd
def annotate(self, *args, **kwargs):
"""
Create an annotation: a piece of text referring to a data
point.
Parameters
----------
s : string
label
xy : (x, y)
position of element to annotate
xytext : (x, y) , optional, default: None
position of the label `s`
xycoords : string, optional, default: "data"
string that indicates what type of coordinates `xy` is. Examples:
"figure points", "figure pixels", "figure fraction", "axes
points", .... See `matplotlib.text.Annotation` for more details.
textcoords : string, optional
string that indicates what type of coordinates `text` is. Examples:
"figure points", "figure pixels", "figure fraction", "axes
points", .... See `matplotlib.text.Annotation` for more details.
Default is None.
arrowprops : `matplotlib.lines.Line2D` properties, optional
Dictionary of line properties for the arrow that connects
the annotation to the point. If the dictionnary has a key
`arrowstyle`, a `~matplotlib.patches.FancyArrowPatch`
instance is created and drawn. See
`matplotlib.text.Annotation` for more details on valid
options. Default is None.
Returns
-------
a : `~matplotlib.text.Annotation`
Notes
-----
%(Annotation)s
Examples
--------
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if 'clip_on' in kwargs:
a.set_clip_path(self.patch)
self.texts.append(a)
a._remove_method = lambda h: self.texts.remove(h)
return a
#### Lines and spans
@docstring.dedent_interpd
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal line across the axis.
Parameters
----------
y : scalar, optional, default: 0
y position in data coordinates of the horizontal line.
xmin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
xmax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
Notes
-----
kwargs are the same as kwargs to plot, and can be
used to control the line properties. e.g.,
Examples
--------
* draw a thick red hline at 'y' = 0 that spans the xrange::
>>> axhline(linewidth=4, color='r')
* draw a default hline at 'y' = 1 that spans the xrange::
>>> axhline(y=1)
* draw a default hline at 'y' = .5 that spans the the middle half of
the xrange::
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
See also
--------
axhspan : for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axhline generates its own transform.")
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(ydata=y, kwargs=kwargs)
yy = self.convert_yunits(y)
scaley = (yy < ymin) or (yy > ymax)
trans = self.get_yaxis_transform(which='grid')
l = mlines.Line2D([xmin, xmax], [y, y], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
@docstring.dedent_interpd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
Add a vertical line across the axes.
Parameters
----------
x : scalar, optional, default: 0
x position in data coordinates of the vertical line.
ymin : scalar, optional, default: 0
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
ymax : scalar, optional, default: 1
Should be between 0 and 1, 0 being the far left of the plot, 1 the
far right of the plot.
Returns
-------
`~matplotlib.lines.Line2D`
Examples
---------
* draw a thick red vline at *x* = 0 that spans the yrange::
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange::
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange::
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties,
with the exception of 'transform':
%(Line2D)s
See also
--------
axhspan : for example plot and source code
"""
if "transform" in kwargs:
raise ValueError(
"'transform' is not allowed as a kwarg;"
+ "axvline generates its own transform.")
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
self._process_unit_info(xdata=x, kwargs=kwargs)
xx = self.convert_xunits(x)
scalex = (xx < xmin) or (xx > xmax)
trans = self.get_xaxis_transform(which='grid')
l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs)
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
@docstring.dedent_interpd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
Add a horizontal span (rectangle) across the axis.
Call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, e.g., with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes::
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = self.get_yaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scalex=False)
return p
@docstring.dedent_interpd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
Add a vertical span (rectangle) across the axes.
Call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, e.g., with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes::
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`
for example plot and source code
"""
trans = self.get_xaxis_transform(which='grid')
# process the unit information
self._process_unit_info([xmin, xmax], [ymin, ymax], kwargs=kwargs)
# first we need to strip away the units
xmin, xmax = self.convert_xunits([xmin, xmax])
ymin, ymax = self.convert_yunits([ymin, ymax])
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
self.add_patch(p)
self.autoscale_view(scaley=False)
return p
@docstring.dedent
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot horizontal lines at each `y` from `xmin` to `xmax`.
Parameters
----------
y : scalar or sequence of scalar
y-indexes where to plot the lines.
xmin, xmax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
vlines : vertical lines
Examples
--------
.. plot:: mpl_examples/pylab_examples/vline_hline_demo.py
"""
# We do the conversion first since not all unitized data is uniform
# process the unit information
self._process_unit_info([xmin, xmax], y, kwargs=kwargs)
y = self.convert_yunits(y)
xmin = self.convert_xunits(xmin)
xmax = self.convert_xunits(xmax)
if not iterable(y):
y = [y]
if not iterable(xmin):
xmin = [xmin]
if not iterable(xmax):
xmax = [xmax]
y = np.ravel(y)
xmin = np.resize(xmin, y.shape)
xmax = np.resize(xmax, y.shape)
verts = [((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
if len(y) > 0:
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
Plot vertical lines.
Plot vertical lines at each `x` from `ymin` to `ymax`.
Parameters
----------
x : scalar or 1D array_like
x-indexes where to plot the lines.
ymin, ymax : scalar or 1D array_like
Respective beginning and end of each line. If scalars are
provided, all lines will have same length.
colors : array_like of colors, optional, default: 'k'
linestyles : ['solid' | 'dashed' | 'dashdot' | 'dotted'], optional
label : string, optional, default: ''
Returns
-------
lines : `~matplotlib.collections.LineCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.LineCollection` properties.
See also
--------
hlines : horizontal lines
Examples
---------
.. plot:: mpl_examples/pylab_examples/vline_hline_demo.py
"""
self._process_unit_info(xdata=x, ydata=[ymin, ymax], kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits(x)
ymin = self.convert_yunits(ymin)
ymax = self.convert_yunits(ymax)
if not iterable(x):
x = [x]
if not iterable(ymin):
ymin = [ymin]
if not iterable(ymax):
ymax = [ymax]
x = np.ravel(x)
ymin = np.resize(ymin, x.shape)
ymax = np.resize(ymax, x.shape)
verts = [((thisx, thisymin), (thisx, thisymax))
for thisx, thisymin, thisymax in zip(x, ymin, ymax)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
if len(x) > 0:
minx = min(x)
maxx = max(x)
miny = min(min(ymin), min(ymax))
maxy = max(max(ymin), max(ymax))
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
@docstring.dedent_interpd
def eventplot(self, positions, orientation='horizontal', lineoffsets=1,
linelengths=1, linewidths=None, colors=None,
linestyles='solid', **kwargs):
"""
Plot identical parallel lines at specific positions.
Call signature::
eventplot(positions, orientation='horizontal', lineoffsets=0,
linelengths=1, linewidths=None, color =None,
linestyles='solid'
Plot parallel lines at the given positions. positions should be a 1D
or 2D array-like object, with each row corresponding to a row or column
of lines.
This type of plot is commonly used in neuroscience for representing
neural events, where it is commonly called a spike raster, dot raster,
or raster plot.
However, it is useful in any situation where you wish to show the
timing or position of multiple sets of discrete events, such as the
arrival times of people to a business on each day of the month or the
date of hurricanes each year of the last century.
*orientation* : [ 'horizonal' | 'vertical' ]
'horizonal' : the lines will be vertical and arranged in rows
"vertical' : lines will be horizontal and arranged in columns
*lineoffsets* :
A float or array-like containing floats.
*linelengths* :
A float or array-like containing floats.
*linewidths* :
A float or array-like containing floats.
*colors*
must be a sequence of RGBA tuples (e.g., arbitrary color
strings, etc, not allowed) or a list of such sequences
*linestyles* :
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] or an array of these
values
For linelengths, linewidths, colors, and linestyles, if only a single
value is given, that value is applied to all lines. If an array-like
is given, it must have the same length as positions, and each value
will be applied to the corresponding row or column in positions.
Returns a list of :class:`matplotlib.collections.EventCollection`
objects that were added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
**Example:**
.. plot:: mpl_examples/pylab_examples/eventplot_demo.py
"""
self._process_unit_info(xdata=positions,
ydata=[lineoffsets, linelengths],
kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
positions = self.convert_xunits(positions)
lineoffsets = self.convert_yunits(lineoffsets)
linelengths = self.convert_yunits(linelengths)
if not iterable(positions):
positions = [positions]
elif any(iterable(position) for position in positions):
positions = [np.asanyarray(position) for position in positions]
else:
positions = [np.asanyarray(positions)]
if len(positions) == 0:
return []
if not iterable(lineoffsets):
lineoffsets = [lineoffsets]
if not iterable(linelengths):
linelengths = [linelengths]
if not iterable(linewidths):
linewidths = [linewidths]
if not iterable(colors):
colors = [colors]
if hasattr(linestyles, 'lower') or not iterable(linestyles):
linestyles = [linestyles]
lineoffsets = np.asarray(lineoffsets)
linelengths = np.asarray(linelengths)
linewidths = np.asarray(linewidths)
if len(lineoffsets) == 0:
lineoffsets = [None]
if len(linelengths) == 0:
linelengths = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(linewidths) == 0:
lineoffsets = [None]
if len(colors) == 0:
colors = [None]
if len(lineoffsets) == 1 and len(positions) != 1:
lineoffsets = np.tile(lineoffsets, len(positions))
lineoffsets[0] = 0
lineoffsets = np.cumsum(lineoffsets)
if len(linelengths) == 1:
linelengths = np.tile(linelengths, len(positions))
if len(linewidths) == 1:
linewidths = np.tile(linewidths, len(positions))
if len(colors) == 1:
colors = list(colors)
colors = colors * len(positions)
if len(linestyles) == 1:
linestyles = [linestyles] * len(positions)
if len(lineoffsets) != len(positions):
raise ValueError('lineoffsets and positions are unequal sized '
'sequences')
if len(linelengths) != len(positions):
raise ValueError('linelengths and positions are unequal sized '
'sequences')
if len(linewidths) != len(positions):
raise ValueError('linewidths and positions are unequal sized '
'sequences')
if len(colors) != len(positions):
raise ValueError('colors and positions are unequal sized '
'sequences')
if len(linestyles) != len(positions):
raise ValueError('linestyles and positions are unequal sized '
'sequences')
colls = []
for position, lineoffset, linelength, linewidth, color, linestyle in \
zip(positions, lineoffsets, linelengths, linewidths,
colors, linestyles):
coll = mcoll.EventCollection(position,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle)
self.add_collection(coll, autolim=False)
coll.update(kwargs)
colls.append(coll)
if len(positions) > 0:
# try to get min/max
min_max = [(np.min(_p), np.max(_p)) for _p in positions
if len(_p) > 0]
# if we have any non-empty positions, try to autoscale
if len(min_max) > 0:
mins, maxes = zip(*min_max)
minpos = np.min(mins)
maxpos = np.max(maxes)
minline = (lineoffsets - linelengths).min()
maxline = (lineoffsets + linelengths).max()
if colls[0].is_horizontal():
corners = (minpos, minline), (maxpos, maxline)
else:
corners = (minline, minpos), (maxline, maxpos)
self.update_datalim(corners)
self.autoscale_view()
return colls
#### Basic plotting
@docstring.dedent_interpd
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
By default, each line is assigned a different color specified by a
'color cycle'. To change this behavior, you can edit the
axes.color_cycle rcParam.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
``'-'`` solid line style
``'--'`` dashed line style
``'-.'`` dash-dot line style
``':'`` dotted line style
``'.'`` point marker
``','`` pixel marker
``'o'`` circle marker
``'v'`` triangle_down marker
``'^'`` triangle_up marker
``'<'`` triangle_left marker
``'>'`` triangle_right marker
``'1'`` tri_down marker
``'2'`` tri_up marker
``'3'`` tri_left marker
``'4'`` tri_right marker
``'s'`` square marker
``'p'`` pentagon marker
``'*'`` star marker
``'h'`` hexagon1 marker
``'H'`` hexagon2 marker
``'+'`` plus marker
``'x'`` x marker
``'D'`` diamond marker
``'d'`` thin_diamond marker
``'|'`` vline marker
``'_'`` hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12).
See :class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop('scalex', True)
scaley = kwargs.pop('scaley', True)
if not self._hold:
self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
@docstring.dedent_interpd
def plot_date(self, x, y, fmt='o', tz=None, xdate=True, ydate=False,
**kwargs):
"""
Plot with data with dates.
Call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True,
ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ *None* | timezone string | :class:`tzinfo` instance]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ *True* | *False* ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ *False* | *True* ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.dates.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.dates.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.dates.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.dates.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates` for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange` for help on creating the required
floating point dates.
"""
if not self._hold:
self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
@docstring.dedent_interpd
def loglog(self, *args, **kwargs):
"""
Make a plot with log scaling on both the *x* and *y* axis.
Call signature::
loglog(*args, **kwargs)
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
Base of the *x*/*y* logarithm
*subsx*/*subsy*: [ *None* | sequence ]
The location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
*nonposx*/*nonposy*: ['mask' | 'clip' ]
Non-positive values in *x* or *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold:
self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogx(self, *args, **kwargs):
"""
Make a plot with log scaling on the *x* axis.
Call signature::
semilogx(*args, **kwargs)
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
Base of the *x* logarithm
*subsx*: [ *None* | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
*nonposx*: [ 'mask' | 'clip' ]
Non-positive values in *x* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold:
self.cla()
d = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
'nonposx': kwargs.pop('nonposx', 'mask'),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def semilogy(self, *args, **kwargs):
"""
Make a plot with log scaling on the *y* axis.
call signature::
semilogy(*args, **kwargs)
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ *None* | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
*nonposy*: [ 'mask' | 'clip' ]
Non-positive values in *y* can be masked as
invalid, or clipped to a very small positive number
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`
For example code and figure
"""
if not self._hold:
self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
'nonposy': kwargs.pop('nonposy', 'mask'),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
@docstring.dedent_interpd
def acorr(self, x, **kwargs):
"""
Plot the autocorrelation of `x`.
Parameters
----------
x : sequence of scalar
hold : boolean, optional, default: True
detrend : callable, optional, default: `mlab.detrend_none`
x is detrended by the `detrend` callable. Default is no
normalization.
normed : boolean, optional, default: True
if True, normalize the data by the autocorrelation at the 0-th
lag.
usevlines : boolean, optional, default: True
if True, Axes.vlines is used to plot the vertical lines from the
origin to the acorr. Otherwise, Axes.plot is used.
maxlags : integer, optional, default: 10
number of lags to show. If None, will return all 2 * len(x) - 1
lags.
Returns
-------
(lags, c, line, b) : where:
- `lags` are a length 2`maxlags+1 lag vector.
- `c` is the 2`maxlags+1 auto correlation vectorI
- `line` is a `~matplotlib.lines.Line2D` instance returned by
`plot`.
- `b` is the x-axis.
Other parameters
-----------------
linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
Only used if usevlines is False.
marker : string, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
`mode` = 2.
Examples
--------
`~matplotlib.pyplot.xcorr` is top graph, and
`~matplotlib.pyplot.acorr` is bottom graph.
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
@docstring.dedent_interpd
def xcorr(self, x, y, normed=True, detrend=mlab.detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
Plot the cross correlation between *x* and *y*.
Parameters
----------
x : sequence of scalars of length n
y : sequence of scalars of length n
hold : boolean, optional, default: True
detrend : callable, optional, default: `mlab.detrend_none`
x is detrended by the `detrend` callable. Default is no
normalization.
normed : boolean, optional, default: True
if True, normalize the data by the autocorrelation at the 0-th
lag.
usevlines : boolean, optional, default: True
if True, Axes.vlines is used to plot the vertical lines from the
origin to the acorr. Otherwise, Axes.plot is used.
maxlags : integer, optional, default: 10
number of lags to show. If None, will return all 2 * len(x) - 1
lags.
Returns
-------
(lags, c, line, b) : where:
- `lags` are a length 2`maxlags+1 lag vector.
- `c` is the 2`maxlags+1 auto correlation vectorI
- `line` is a `~matplotlib.lines.Line2D` instance returned by
`plot`.
- `b` is the x-axis (none, if plot is used).
Other parameters
-----------------
linestyle : `~matplotlib.lines.Line2D` prop, optional, default: None
Only used if usevlines is False.
marker : string, optional, default: 'o'
Notes
-----
The cross correlation is performed with :func:`numpy.correlate` with
`mode` = 2.
"""
Nx = len(x)
if Nx != len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed:
c /= np.sqrt(np.dot(x, x) * np.dot(y, y))
if maxlags is None:
maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d' % Nx)
lags = np.arange(-maxlags, maxlags + 1)
c = c[Nx - 1 - maxlags:Nx + maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
"""
Make a step plot.
Call signature::
step(x, y, *args, **kwargs)
Additional keyword args to :func:`step` are the same as those
for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i+1]
If 'post', that interval has level y[i]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
"""
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
usr_linestyle = kwargs.pop('linestyle', '')
kwargs['linestyle'] = 'steps-' + where + usr_linestyle
return self.plot(x, y, *args, **kwargs)
@docstring.dedent_interpd
def bar(self, left, height, width=0.8, bottom=None, **kwargs):
"""
Make a bar plot.
Make a bar plot with rectangles bounded by:
`left`, `left` + `width`, `bottom`, `bottom` + `height`
(left, right, bottom and top edges)
Parameters
----------
left : sequence of scalars
the x coordinates of the left sides of the bars
height : sequence of scalars
the heights of the bars
width : scalar or array-like, optional, default: 0.8
the width(s) of the bars
bottom : scalar or array-like, optional, default: None
the y coordinate(s) of the bars
color : scalar or array-like, optional
the colors of the bar faces
edgecolor : scalar or array-like, optional
the colors of the bar edges
linewidth : scalar or array-like, optional, default: None
width of bar edge(s). If None, use default
linewidth; If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
yerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
ecolor : scalar or array-like, optional, default: None
specifies the color of errorbar(s)
capsize : integer, optional, default: 3
determines the length in points of the error bar caps
error_kw :
dictionary of kwargs to be passed to errorbar method. *ecolor* and
*capsize* may be specified here rather than as independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If `edge`, aligns bars by their left edges (for vertical bars) and
by their bottom edges (for horizontal bars). If `center`, interpret
the `left` argument as the coordinates of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale
Returns
-------
`matplotlib.patches.Rectangle` instances.
Notes
-----
The optional arguments `color`, `edgecolor`, `linewidth`,
`xerr`, and `yerr` can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: `xerr` and `yerr` are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
See also
--------
barh: Plot a horizontal bar plot.
Examples
--------
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold:
self.cla()
color = kwargs.pop('color', None)
edgecolor = kwargs.pop('edgecolor', None)
linewidth = kwargs.pop('linewidth', None)
# Because xerr and yerr will be passed to errorbar,
# most dimension checking and processing will be left
# to the errorbar method.
xerr = kwargs.pop('xerr', None)
yerr = kwargs.pop('yerr', None)
error_kw = kwargs.pop('error_kw', dict())
ecolor = kwargs.pop('ecolor', None)
capsize = kwargs.pop('capsize', 3)
error_kw.setdefault('ecolor', ecolor)
error_kw.setdefault('capsize', capsize)
align = kwargs.pop('align', 'edge')
orientation = kwargs.pop('orientation', 'vertical')
log = kwargs.pop('log', False)
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log', nonposy='clip')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
adjust_ylim = True
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log', nonposx='clip')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
adjust_xlim = True
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError('invalid orientation: %s' % orientation)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) == 0: # until to_rgba_array is changed
color = [[0, 0, 0, 0]]
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) == 0: # until to_rgba_array is changed
edgecolor = [[0, 0, 0, 0]]
if len(edgecolor) < nbars:
edgecolor *= nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left) == nbars, ("incompatible sizes: argument 'left' must "
"be length %d or scalar" % nbars)
assert len(height) == nbars, ("incompatible sizes: argument 'height' "
"must be length %d or scalar" %
nbars)
assert len(width) == nbars, ("incompatible sizes: argument 'width' "
"must be length %d or scalar" %
nbars)
assert len(bottom) == nbars, ("incompatible sizes: argument 'bottom' "
"must be length %d or scalar" %
nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
left = self.convert_xunits(left)
width = self.convert_xunits(width)
if xerr is not None:
xerr = self.convert_xunits(xerr)
if self.yaxis is not None:
bottom = self.convert_yunits(bottom)
height = self.convert_yunits(height)
if yerr is not None:
yerr = self.convert_yunits(yerr)
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i] / 2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i] / 2.
for i in xrange(len(bottom))]
else:
raise ValueError('invalid alignment: %s' % align)
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h < 0:
b += h
h = abs(h)
if w < 0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label='_nolegend_'
)
r.update(kwargs)
r.get_path()._interpolation_steps = 100
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l + 0.5 * w for l, w in zip(left, width)]
y = [b + h for b, h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l + w for l, w in zip(left, width)]
y = [b + 0.5 * h for b, h in zip(bottom, height)]
if "label" not in error_kw:
error_kw["label"] = '_nolegend_'
errorbar = self.errorbar(x, y,
yerr=yerr, xerr=xerr,
fmt='none', **error_kw)
else:
errorbar = None
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin([w for w in width if w > 0])
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin * 0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin([h for h in height if h > 0])
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin * 0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
bar_container = BarContainer(patches, errorbar, label=label)
self.add_container(bar_container)
return bar_container
@docstring.dedent_interpd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
Make a horizontal bar plot.
Make a horizontal bar plot with rectangles bounded by:
`left`, `left` + `width`, `bottom`, `bottom` + `height`
(left, right, bottom and top edges)
`bottom`, `width`, `height`, and `left` can be either scalars
or sequences
Parameters
----------
bottom : scalar or array-like
the y coordinate(s) of the bars
width : scalar or array-like
the width(s) of the bars
height : sequence of scalars, optional, default: 0.8
the heights of the bars
left : sequence of scalars
the x coordinates of the left sides of the bars
Returns
--------
`matplotlib.patches.Rectangle` instances.
Other parameters
----------------
color : scalar or array-like, optional
the colors of the bars
edgecolor : scalar or array-like, optional
the colors of the bar edges
linewidth : scalar or array-like, optional, default: None
width of bar edge(s). If None, use default
linewidth; If 0, don't draw edges.
xerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
yerr : scalar or array-like, optional, default: None
if not None, will be used to generate errorbar(s) on the bar chart
ecolor : scalar or array-like, optional, default: None
specifies the color of errorbar(s)
capsize : integer, optional, default: 3
determines the length in points of the error bar caps
error_kw :
dictionary of kwargs to be passed to errorbar method. `ecolor` and
`capsize` may be specified here rather than as independent kwargs.
align : ['edge' | 'center'], optional, default: 'edge'
If `edge`, aligns bars by their left edges (for vertical bars) and
by their bottom edges (for horizontal bars). If `center`, interpret
the `left` argument as the coordinates of the centers of the bars.
orientation : 'vertical' | 'horizontal', optional, default: 'vertical'
The orientation of the bars.
log : boolean, optional, default: False
If true, sets the axis to be log scale
Notes
-----
The optional arguments `color`, `edgecolor`, `linewidth`,
`xerr`, and `yerr` can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Detail: `xerr` and `yerr` are passed directly to
:meth:`errorbar`, so they can also have shape 2xN for
independent specification of lower and upper errors.
Other optional kwargs:
%(Rectangle)s
See also
--------
bar: Plot a vertical bar plot.
"""
patches = self.bar(left=left, height=height, width=width,
bottom=bottom, orientation='horizontal', **kwargs)
return patches
@docstring.dedent_interpd
def broken_barh(self, xranges, yrange, **kwargs):
"""
Plot horizontal bars.
Call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, i.e.,::
facecolors = 'black'
or a sequence of arguments for the various bars, i.e.,::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
def stem(self, *args, **kwargs):
"""
Create a stem plot.
Call signatures::
stem(y, linefmt='b-', markerfmt='bo', basefmt='r-')
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
If no *x* values are provided, the default is (0, 1, ..., len(y) - 1)
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
This
`document <http://www.mathworks.com/help/techdoc/ref/stem.html>`_
for details.
**Example:**
.. plot:: mpl_examples/pylab_examples/stem_plot.py
"""
remember_hold = self._hold
if not self._hold:
self.cla()
self.hold(True)
# Assume there's at least one data array
y = np.asarray(args[0])
args = args[1:]
# Try a second one
try:
second = np.asarray(args[0], dtype=np.float)
x, y = y, second
args = args[1:]
except (IndexError, ValueError):
# The second array doesn't make sense, or it doesn't exist
second = np.arange(len(y))
x = second
# Popping some defaults
try:
linefmt = kwargs.pop('linefmt', args[0])
except IndexError:
linefmt = kwargs.pop('linefmt', 'b-')
try:
markerfmt = kwargs.pop('markerfmt', args[1])
except IndexError:
markerfmt = kwargs.pop('markerfmt', 'bo')
try:
basefmt = kwargs.pop('basefmt', args[2])
except IndexError:
basefmt = kwargs.pop('basefmt', 'r-')
bottom = kwargs.pop('bottom', None)
label = kwargs.pop('label', None)
markerline, = self.plot(x, y, markerfmt, label="_nolegend_")
if bottom is None:
bottom = 0
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx, thisx], [bottom, thisy], linefmt,
label="_nolegend_")
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [bottom, bottom],
basefmt, label="_nolegend_")
self.hold(remember_hold)
stem_container = StemContainer((markerline, stemlines, baseline),
label=label)
self.add_container(stem_container)
return stem_container
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1,
startangle=None, radius=None, counterclock=True,
wedgeprops=None, textprops=None):
r"""
Plot a pie chart.
Call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1, startangle=None, radius=None,
counterclock=True, wedgeprops=None, textprops=None)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized. The wedges are plotted counterclockwise,
by default starting from the x-axis.
Keyword arguments:
*explode*: [ *None* | len(x) sequence ]
If not *None*, is a ``len(x)`` array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ *None* | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ *None* | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ *None* | format string | format function ]
If not *None*, is a string or function used to label the wedges
with their numeric value. The label will be placed inside the
wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ *False* | *True* ]
Draw a shadow beneath the pie.
*startangle*: [ *None* | Offset angle ]
If not *None*, rotates the start of the pie chart by *angle*
degrees counterclockwise from the x-axis.
*radius*: [ *None* | scalar ]
The radius of the pie, if *radius* is *None* it will be set to 1.
*counterclock*: [ *False* | *True* ]
Specify fractions direction, clockwise or counterclockwise.
*wedgeprops*: [ *None* | dict of key value pairs ]
Dict of arguments passed to the wedge objects making the pie.
For example, you can pass in wedgeprops = { 'linewidth' : 3 }
to set the width of the wedge border lines equal to 3.
For more details, look at the doc/arguments of the wedge object.
By default `clip_on=False`.
*textprops*: [ *None* | dict of key value pairs ]
Dict of arguments to pass to the text objects.
The pie chart will probably look best if the figure and axes are
square, or the Axes aspect is equal. e.g.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
or::
axes(aspect=1)
Return value:
If *autopct* is *None*, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx > 1:
x = np.divide(x, sx)
if labels is None:
labels = [''] * len(x)
if explode is None:
explode = [0] * len(x)
assert(len(x) == len(labels))
assert(len(x) == len(explode))
if colors is None:
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0, 0
if radius is None:
radius = 1
# Starting theta1 is the start fraction of the circle
if startangle is None:
theta1 = 0
else:
theta1 = startangle / 360.0
# set default values in wedge_prop
if wedgeprops is None:
wedgeprops = {}
if 'clip_on' not in wedgeprops:
wedgeprops['clip_on'] = False
if textprops is None:
textprops = {}
if 'clip_on' not in textprops:
textprops['clip_on'] = False
texts = []
slices = []
autotexts = []
i = 0
for frac, label, expl in cbook.safezip(x, labels, explode):
x, y = center
theta2 = (theta1 + frac) if counterclock else (theta1 - frac)
thetam = 2 * math.pi * 0.5 * (theta1 + theta2)
x += expl * math.cos(thetam)
y += expl * math.sin(thetam)
w = mpatches.Wedge((x, y), radius, 360. * min(theta1, theta2),
360. * max(theta1, theta2),
facecolor=colors[i % len(colors)],
**wedgeprops)
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02)
shad.set_zorder(0.9 * w.get_zorder())
shad.set_label('_nolegend_')
self.add_patch(shad)
xt = x + labeldistance * radius * math.cos(thetam)
yt = y + labeldistance * radius * math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center',
**textprops)
texts.append(t)
if autopct is not None:
xt = x + pctdistance * radius * math.cos(thetam)
yt = y + pctdistance * radius * math.sin(thetam)
if is_string_like(autopct):
s = autopct % (100. * frac)
elif six.callable(autopct):
s = autopct(100. * frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center',
**textprops)
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None:
return slices, texts
else:
return slices, texts, autotexts
@docstring.dedent_interpd
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
**kwargs):
"""
Plot an errorbar graph.
Call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1,
capthick=None)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, or 2xN array-like ]
If a scalar number, len(N) array-like object, or an Nx1
array-like object, errorbars are drawn at +/-value relative
to the data.
If a sequence of shape 2xN, errorbars are drawn at -row1
and +row2 relative to the data.
*fmt*: [ '' | 'none' | plot format string ]
The plot format symbol. If *fmt* is 'none' (case-insensitive),
only the errorbars are plotted. This is used for adding
errorbars to a bar plot, for example. Default is '',
an empty plot format string; properties are
then identical to the defaults for :meth:`plot`.
*ecolor*: [ *None* | mpl color ]
A matplotlib color arg which gives the color the errorbar lines;
if *None*, use the color of the line connecting the markers.
*elinewidth*: scalar
The linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
The length of the error bar caps in points
*capthick*: scalar
An alias kwarg to *markeredgewidth* (a.k.a. - *mew*). This
setting is a more sensible name for the property that
controls the thickness of the error bar cap in points. For
backwards compatibility, if *mew* or *markeredgewidth* are given,
then they will over-ride *capthick*. This may change in future
releases.
*barsabove*: [ *True* | *False* ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims* / *uplims* / *xlolims* / *xuplims*: [ *False* | *True* ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*. To use limits with inverted
axes, :meth:`set_xlim` or :meth:`set_ylim` must be called
before :meth:`errorbar`.
*errorevery*: positive integer
subsamples the errorbars. e.g., if everyerror=5, errorbars for
every 5-th datapoint will be plotted. The data plot itself still
shows all data points.
All other keyword arguments are passed on to the plot command for the
markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Returns (*plotline*, *caplines*, *barlinecols*):
*plotline*: :class:`~matplotlib.lines.Line2D` instance
*x*, *y* plot markers and/or line
*caplines*: list of error bar cap
:class:`~matplotlib.lines.Line2D` instances
*barlinecols*: list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/statistics/errorbar_demo.py
"""
if errorevery < 1:
raise ValueError(
'errorevery has to be a strictly positive integer')
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold:
self.cla()
holdstate = self._hold
self._hold = True
if fmt is None:
fmt = 'none'
msg = ('Use of None object as fmt keyword argument to '
+ 'suppress plotting of data values is deprecated '
+ 'since 1.4; use the string "none" instead.')
warnings.warn(msg, mplDeprecation, stacklevel=1)
plot_line = (fmt.lower() != 'none')
label = kwargs.pop("label", None)
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr] * len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr] * len(y)
l0 = None
# Instead of using zorder, the line plot is being added
# either here, or after all the errorbar plot elements.
if barsabove and plot_line:
l0, = self.plot(x, y, fmt, label="_nolegend_", **kwargs)
barcols = []
caplines = []
lines_kw = {'label': '_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
for key in ('linewidth', 'lw'):
if key in kwargs:
lines_kw[key] = kwargs[key]
for key in ('transform', 'alpha', 'zorder'):
if key in kwargs:
lines_kw[key] = kwargs[key]
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims] * len(x), bool)
else:
lolims = np.asarray(lolims, bool)
if not iterable(uplims):
uplims = np.array([uplims] * len(x), bool)
else:
uplims = np.asarray(uplims, bool)
if not iterable(xlolims):
xlolims = np.array([xlolims] * len(x), bool)
else:
xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims):
xuplims = np.array([xuplims] * len(x), bool)
else:
xuplims = np.asarray(xuplims, bool)
everymask = np.arange(len(x)) % errorevery == 0
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs) == len(ys)
assert len(xs) == len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
plot_kw = {'label': '_nolegend_'}
if capsize > 0:
plot_kw['ms'] = 2. * capsize
if capthick is not None:
# 'mew' has higher priority, I believe,
# if both 'mew' and 'markeredgewidth' exists.
# So, save capthick to markeredgewidth so that
# explicitly setting mew or markeredgewidth will
# over-write capthick.
plot_kw['markeredgewidth'] = capthick
# For backwards-compat, allow explicit setting of
# 'mew' or 'markeredgewidth' to over-ride capthick.
for key in ('markeredgewidth', 'mew', 'transform', 'alpha', 'zorder'):
if key in kwargs:
plot_kw[key] = kwargs[key]
if xerr is not None:
if (iterable(xerr) and len(xerr) == 2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx - thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr[0])]
right = [thisx + thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx - thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr)]
right = [thisx + thiserr for (thisx, thiserr)
in cbook.safezip(x, xerr)]
# select points without upper/lower limits in x and
# draw normal errorbars for these points
noxlims = ~(xlolims | xuplims)
if noxlims.any():
yo, _ = xywhere(y, right, noxlims & everymask)
lo, ro = xywhere(left, right, noxlims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
if capsize > 0:
caplines.extend(self.plot(lo, yo, 'k|', **plot_kw))
caplines.extend(self.plot(ro, yo, 'k|', **plot_kw))
if xlolims.any():
yo, _ = xywhere(y, right, xlolims & everymask)
lo, ro = xywhere(x, right, xlolims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
rightup, yup = xywhere(right, y, xlolims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETLEFT
else:
marker = mlines.CARETRIGHT
caplines.extend(
self.plot(rightup, yup, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xlo, ylo = xywhere(x, y, xlolims & everymask)
caplines.extend(self.plot(xlo, ylo, 'k|', **plot_kw))
if xuplims.any():
yo, _ = xywhere(y, right, xuplims & everymask)
lo, ro = xywhere(left, x, xuplims & everymask)
barcols.append(self.hlines(yo, lo, ro, **lines_kw))
leftlo, ylo = xywhere(left, y, xuplims & everymask)
if self.xaxis_inverted():
marker = mlines.CARETRIGHT
else:
marker = mlines.CARETLEFT
caplines.extend(
self.plot(leftlo, ylo, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xup, yup = xywhere(x, y, xuplims & everymask)
caplines.extend(self.plot(xup, yup, 'k|', **plot_kw))
if yerr is not None:
if (iterable(yerr) and len(yerr) == 2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy - thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr[0])]
upper = [thisy + thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy - thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr)]
upper = [thisy + thiserr for (thisy, thiserr)
in cbook.safezip(y, yerr)]
# select points without upper/lower limits in y and
# draw normal errorbars for these points
noylims = ~(lolims | uplims)
if noylims.any():
xo, _ = xywhere(x, lower, noylims & everymask)
lo, uo = xywhere(lower, upper, noylims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
if capsize > 0:
caplines.extend(self.plot(xo, lo, 'k_', **plot_kw))
caplines.extend(self.plot(xo, uo, 'k_', **plot_kw))
if lolims.any():
xo, _ = xywhere(x, lower, lolims & everymask)
lo, uo = xywhere(y, upper, lolims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
xup, upperup = xywhere(x, upper, lolims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETDOWN
else:
marker = mlines.CARETUP
caplines.extend(
self.plot(xup, upperup, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xlo, ylo = xywhere(x, y, lolims & everymask)
caplines.extend(self.plot(xlo, ylo, 'k_', **plot_kw))
if uplims.any():
xo, _ = xywhere(x, lower, uplims & everymask)
lo, uo = xywhere(lower, y, uplims & everymask)
barcols.append(self.vlines(xo, lo, uo, **lines_kw))
xlo, lowerlo = xywhere(x, lower, uplims & everymask)
if self.yaxis_inverted():
marker = mlines.CARETUP
else:
marker = mlines.CARETDOWN
caplines.extend(
self.plot(xlo, lowerlo, ls='None', marker=marker,
**plot_kw))
if capsize > 0:
xup, yup = xywhere(x, y, uplims & everymask)
caplines.extend(self.plot(xup, yup, 'k_', **plot_kw))
if not barsabove and plot_line:
l0, = self.plot(x, y, fmt, **kwargs)
if ecolor is None:
if l0 is None:
ecolor = six.next(self._get_lines.color_cycle)
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
self._hold = holdstate
errorbar_container = ErrorbarContainer((l0, tuple(caplines),
tuple(barcols)),
has_xerr=(xerr is not None),
has_yerr=(yerr is not None),
label=label)
self.containers.append(errorbar_container)
return errorbar_container # (l0, caplines, barcols)
def boxplot(self, x, notch=False, sym=None, vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None,
capprops=None, whiskerprops=None, manage_xticks=True):
"""
Make a box and whisker plot.
Call signature::
boxplot(self, x, notch=False, sym='b+', vert=True, whis=1.5,
positions=None, widths=None, patch_artist=False,
bootstrap=None, usermedians=None, conf_intervals=None,
meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None,
capprops=None, whiskerprops=None, manage_xticks=True):
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
x : Array or a sequence of vectors.
The input data.
notch : bool, default = False
If False, produces a rectangular box plot.
If True, will produce a notched box plot
sym : str or None, default = None
The default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
If `None`, then the fliers default to 'b+' If you want more
control use the flierprops kwarg.
vert : bool, default = True
If True (default), makes the boxes vertical.
If False, makes horizontal boxes.
whis : float, sequence (default = 1.5) or string
As a float, determines the reach of the whiskers past the first
and third quartiles (e.g., Q3 + whis*IQR, IQR = interquartile
range, Q3-Q1). Beyond the whiskers, data are considered outliers
and are plotted as individual points. Set this to an unreasonably
high value to force the whiskers to show the min and max values.
Alternatively, set this to an ascending sequence of percentile
(e.g., [5, 95]) to set the whiskers at specific percentiles of
the data. Finally, *whis* can be the string 'range' to force the
whiskers to the min and max of the data. In the edge case that
the 25th and 75th percentiles are equivalent, *whis* will be
automatically set to 'range'.
bootstrap : None (default) or integer
Specifies whether to bootstrap the confidence intervals
around the median for notched boxplots. If bootstrap==None,
no bootstrapping is performed, and notches are calculated
using a Gaussian-based asymptotic approximation (see McGill, R.,
Tukey, J.W., and Larsen, W.A., 1978, and Kendall and Stuart,
1967). Otherwise, bootstrap specifies the number of times to
bootstrap the median to determine it's 95% confidence intervals.
Values between 1000 and 10000 are recommended.
usermedians : array-like or None (default)
An array or sequence whose first dimension (or length) is
compatible with *x*. This overrides the medians computed by
matplotlib for each element of *usermedians* that is not None.
When an element of *usermedians* == None, the median will be
computed by matplotlib as normal.
conf_intervals : array-like or None (default)
Array or sequence whose first dimension (or length) is compatible
with *x* and whose second dimension is 2. When the current element
of *conf_intervals* is not None, the notch locations computed by
matplotlib are overridden (assuming notch is True). When an
element of *conf_intervals* is None, boxplot compute notches the
method specified by the other kwargs (e.g., *bootstrap*).
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = 0.5
Either a scalar or a vector and sets the width of each box. The
default is 0.5, or ``0.15*(distance between extreme positions)``
if that is smaller.
labels : sequence or None (default)
Labels for each dataset. Length must be compatible with
dimensions of *x*
patch_artist : bool, default = False
If False produces boxes with the Line2D artist
If True produces boxes with the Patch artist
showmeans : bool, default = False
If True, will toggle one the rendering of the means
showcaps : bool, default = True
If True, will toggle one the rendering of the caps
showbox : bool, default = True
If True, will toggle one the rendering of box
showfliers : bool, default = True
If True, will toggle one the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided, will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If True (and *showmeans* is True), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created. That dictionary has the following keys
(assuming vertical boxplots):
- boxes: the main body of the boxplot showing the quartiles
and the median's confidence intervals if enabled.
- medians: horizonal lines at the median of each box.
- whiskers: the vertical lines extending to the most extreme,
n-outlier data points.
- caps: the horizontal lines at the ends of the whiskers.
- fliers: points representing data that extend beyond the
whiskers (outliers).
- means: points or lines representing the means.
Examples
--------
.. plot:: mpl_examples/statistics/boxplot_demo.py
"""
bxpstats = cbook.boxplot_stats(x, whis=whis, bootstrap=bootstrap,
labels=labels)
# make sure we have a dictionary
if flierprops is None:
flierprops = dict()
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == '':
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle='none', marker='',
color='none')
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops['marker'] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops['color'] = color
# replace medians if necessary:
if usermedians is not None:
if (len(np.ravel(usermedians)) != len(bxpstats) or
np.shape(usermedians)[0] != len(bxpstats)):
medmsg = 'usermedians length not compatible with x'
raise ValueError(medmsg)
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats['med'] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
raise ValueError('conf_intervals length not '
'compatible with x')
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError('each confidence interval must '
'have two values')
else:
if ci[0] is not None:
stats['cilo'] = ci[0]
if ci[1] is not None:
stats['cihi'] = ci[1]
artists = self.bxp(bxpstats, positions=positions, widths=widths,
vert=vert, patch_artist=patch_artist,
shownotches=notch, showmeans=showmeans,
showcaps=showcaps, showbox=showbox,
boxprops=boxprops, flierprops=flierprops,
medianprops=medianprops, meanprops=meanprops,
meanline=meanline, showfliers=showfliers,
capprops=capprops, whiskerprops=whiskerprops,
manage_xticks=manage_xticks)
return artists
def bxp(self, bxpstats, positions=None, widths=None, vert=True,
patch_artist=False, shownotches=False, showmeans=False,
showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_xticks=True):
"""
Drawing function for box and whisker plots.
Call signature::
bxp(self, bxpstats, positions=None, widths=None, vert=True,
patch_artist=False, shownotches=False, showmeans=False,
showcaps=True, showbox=True, showfliers=True,
boxprops=None, whiskerprops=None, flierprops=None,
medianprops=None, capprops=None, meanprops=None,
meanline=False, manage_xticks=True):
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
Parameters
----------
bxpstats : list of dicts
A list of dictionaries containing stats for each boxplot.
Required keys are:
- ``med``: The median (scalar float).
- ``q1``: The first quartile (25th percentile) (scalar
float).
- ``q3``: The first quartile (50th percentile) (scalar
float).
- ``whislo``: Lower bound of the lower whisker (scalar
float).
- ``whishi``: Upper bound of the upper whisker (scalar
float).
Optional keys are:
- ``mean``: The mean (scalar float). Needed if
``showmeans=True``.
- ``fliers``: Data beyond the whiskers (sequence of floats).
Needed if ``showfliers=True``.
- ``cilo`` & ``cihi``: Lower and upper confidence intervals
about the median. Needed if ``shownotches=True``.
- ``label``: Name of the dataset (string). If available,
this will be used a tick label for the boxplot
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the boxes. The ticks and limits
are automatically set to match the positions.
widths : array-like, default = 0.5
Either a scalar or a vector and sets the width of each
box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
vert : bool, default = False
If `True` (default), makes the boxes vertical. If `False`,
makes horizontal boxes.
patch_artist : bool, default = False
If `False` produces boxes with the
`~matplotlib.lines.Line2D` artist. If `True` produces boxes
with the `~matplotlib.patches.Patch` artist.
shownotches : bool, default = False
If `False` (default), produces a rectangular box plot.
If `True`, will produce a notched box plot
showmeans : bool, default = False
If `True`, will toggle one the rendering of the means
showcaps : bool, default = True
If `True`, will toggle one the rendering of the caps
showbox : bool, default = True
If `True`, will toggle one the rendering of box
showfliers : bool, default = True
If `True`, will toggle one the rendering of the fliers
boxprops : dict or None (default)
If provided, will set the plotting style of the boxes
whiskerprops : dict or None (default)
If provided, will set the plotting style of the whiskers
capprops : dict or None (default)
If provided, will set the plotting style of the caps
flierprops : dict or None (default)
If provided will set the plotting style of the fliers
medianprops : dict or None (default)
If provided, will set the plotting style of the medians
meanprops : dict or None (default)
If provided, will set the plotting style of the means
meanline : bool, default = False
If `True` (and *showmeans* is `True`), will try to render the mean
as a line spanning the full width of the box according to
*meanprops*. Not recommended if *shownotches* is also True.
Otherwise, means will be shown as points.
manage_xticks : bool, default = True
If the function should adjust the xlim and xtick locations.
Returns
-------
result : dict
A dictionary mapping each component of the boxplot to a list
of the :class:`matplotlib.lines.Line2D` instances
created. That dictionary has the following keys (assuming
vertical boxplots):
- ``boxes``: the main body of the boxplot showing the
quartiles and the median's confidence intervals if
enabled.
- ``medians``: horizonal lines at the median of each box.
- ``whiskers``: the vertical lines extending to the most
extreme, n-outlier data points.
- ``caps``: the horizontal lines at the ends of the
whiskers.
- ``fliers``: points representing data that extend beyond
the whiskers (fliers).
- ``means``: points or lines representing the means.
Examples
--------
.. plot:: mpl_examples/statistics/bxp_demo.py
"""
# lists of artists to be output
whiskers = []
caps = []
boxes = []
medians = []
means = []
fliers = []
# empty list of xticklabels
datalabels = []
# translates between line2D and patch linestyles
linestyle_map = {
'solid': '-',
'dashed': '--',
'dashdot': '-.',
'dotted': ':'
}
# box properties
if patch_artist:
final_boxprops = dict(linestyle='solid', edgecolor='black',
facecolor='white', linewidth=1)
else:
final_boxprops = dict(linestyle='-', color='blue')
if boxprops is not None:
final_boxprops.update(boxprops)
# other (cap, whisker) properties
final_whiskerprops = dict(
linestyle='--',
color='blue',
)
final_capprops = dict(
linestyle='-',
color='black',
)
if capprops is not None:
final_capprops.update(capprops)
if whiskerprops is not None:
final_whiskerprops.update(whiskerprops)
# set up the default flier properties
final_flierprops = dict(linestyle='none', marker='+', color='blue')
# flier (outlier) properties
if flierprops is not None:
final_flierprops.update(flierprops)
# median line properties
final_medianprops = dict(linestyle='-', color='red')
if medianprops is not None:
final_medianprops.update(medianprops)
# mean (line or point) properties
if meanline:
final_meanprops = dict(linestyle='--', color='black')
else:
final_meanprops = dict(linestyle='none', markerfacecolor='red',
marker='s')
if meanprops is not None:
final_meanprops.update(meanprops)
def to_vc(xs, ys):
# convert arguments to verts and codes
verts = []
#codes = []
for xi, yi in zip(xs, ys):
verts.append((xi, yi))
verts.append((0, 0)) # ignored
codes = [mpath.Path.MOVETO] + \
[mpath.Path.LINETO] * (len(verts) - 2) + \
[mpath.Path.CLOSEPOLY]
return verts, codes
def patch_list(xs, ys, **kwargs):
verts, codes = to_vc(xs, ys)
path = mpath.Path(verts, codes)
patch = mpatches.PathPatch(path, **kwargs)
self.add_artist(patch)
return [patch]
# vertical or horizontal plot?
if vert:
def doplot(*args, **kwargs):
return self.plot(*args, **kwargs)
def dopatch(xs, ys, **kwargs):
return patch_list(xs, ys, **kwargs)
else:
def doplot(*args, **kwargs):
shuffled = []
for i in xrange(0, len(args), 2):
shuffled.extend([args[i + 1], args[i]])
return self.plot(*shuffled, **kwargs)
def dopatch(xs, ys, **kwargs):
xs, ys = ys, xs # flip X, Y
return patch_list(xs, ys, **kwargs)
# input validation
N = len(bxpstats)
datashape_message = ("List of boxplot statistics and `{0}` "
"values must have same the length")
# check position
if positions is None:
positions = list(xrange(1, N + 1))
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# width
if widths is None:
distance = max(positions) - min(positions)
widths = [min(0.15 * max(distance, 1.0), 0.5)] * N
elif np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# check and save the `hold` state of the current axes
if not self._hold:
self.cla()
holdStatus = self._hold
for pos, width, stats in zip(positions, widths, bxpstats):
# try to find a new label
datalabels.append(stats.get('label', pos))
# fliers coords
flier_x = np.ones(len(stats['fliers'])) * pos
flier_y = stats['fliers']
# whisker coords
whisker_x = np.ones(2) * pos
whiskerlo_y = np.array([stats['q1'], stats['whislo']])
whiskerhi_y = np.array([stats['q3'], stats['whishi']])
# cap coords
cap_left = pos - width * 0.25
cap_right = pos + width * 0.25
cap_x = np.array([cap_left, cap_right])
cap_lo = np.ones(2) * stats['whislo']
cap_hi = np.ones(2) * stats['whishi']
# box and median coords
box_left = pos - width * 0.5
box_right = pos + width * 0.5
med_y = [stats['med'], stats['med']]
# notched boxes
if shownotches:
box_x = [box_left, box_right, box_right, cap_right, box_right,
box_right, box_left, box_left, cap_left, box_left,
box_left]
box_y = [stats['q1'], stats['q1'], stats['cilo'],
stats['med'], stats['cihi'], stats['q3'],
stats['q3'], stats['cihi'], stats['med'],
stats['cilo'], stats['q1']]
med_x = cap_x
# plain boxes
else:
box_x = [box_left, box_right, box_right, box_left, box_left]
box_y = [stats['q1'], stats['q1'], stats['q3'], stats['q3'],
stats['q1']]
med_x = [box_left, box_right]
# maybe draw the box:
if showbox:
if patch_artist:
boxes.extend(dopatch(box_x, box_y, **final_boxprops))
else:
boxes.extend(doplot(box_x, box_y, **final_boxprops))
# draw the whiskers
whiskers.extend(doplot(
whisker_x, whiskerlo_y, **final_whiskerprops
))
whiskers.extend(doplot(
whisker_x, whiskerhi_y, **final_whiskerprops
))
# maybe draw the caps:
if showcaps:
caps.extend(doplot(cap_x, cap_lo, **final_capprops))
caps.extend(doplot(cap_x, cap_hi, **final_capprops))
# draw the medians
medians.extend(doplot(med_x, med_y, **final_medianprops))
# maybe draw the means
if showmeans:
if meanline:
means.extend(doplot(
[box_left, box_right], [stats['mean'], stats['mean']],
**final_meanprops
))
else:
means.extend(doplot(
[pos], [stats['mean']], **final_meanprops
))
# maybe draw the fliers
if showfliers:
fliers.extend(doplot(
flier_x, flier_y, **final_flierprops
))
# fix our axes/ticks up a little
if vert:
setticks = self.set_xticks
setlim = self.set_xlim
setlabels = self.set_xticklabels
else:
setticks = self.set_yticks
setlim = self.set_ylim
setlabels = self.set_yticklabels
if manage_xticks:
newlimits = min(positions) - 0.5, max(positions) + 0.5
setlim(newlimits)
setticks(positions)
setlabels(datalabels)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers, means=means)
@docstring.dedent_interpd
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=None, linewidths=None,
verts=None, **kwargs):
"""
Make a scatter plot of x vs y, where x and y are sequence like objects
of the same lengths.
Parameters
----------
x, y : array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, ), optional, default: 20
size in points^2.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs
(see below). Note that `c` should not be a single numeric RGB or
RGBA sequence because that is indistinguishable from an array of
values to be colormapped. `c` can be a 2-D array in which the
rows are RGB or RGBA, however.
marker : `~matplotlib.markers.MarkerStyle`, optional, default: 'o'
See `~matplotlib.markers` for more information on the different
styles of markers scatter supports.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
A `~matplotlib.colors.Colormap` instance or registered name.
`cmap` is only used if `c` is an array of floats. If None,
defaults to rc `image.cmap`.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. `norm` is only used if `c` is an array of
floats. If `None`, use the default :func:`normalize`.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
linewidths : scalar or array_like, optional, default: None
If None, defaults to (lines.linewidth,). Note that this is a
tuple, and if you set the linewidths argument you must set it as a
sequence of floats, as required by
`~matplotlib.collections.RegularPolyCollection`.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Other parameters
----------------
kwargs : `~matplotlib.collections.Collection` properties
Notes
------
Any or all of `x`, `y`, `s`, and `c` may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Examples
--------
.. plot:: mpl_examples/shapes_and_collections/scatter_demo.py
"""
if not self._hold:
self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.convert_xunits(x)
y = self.convert_yunits(y)
# np.ma.ravel yields an ndarray, not a masked array,
# unless its argument is a masked array.
x = np.ma.ravel(x)
y = np.ma.ravel(y)
if x.size != y.size:
raise ValueError("x and y must be the same size")
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
c_is_stringy = is_string_like(c) or is_sequence_of_strings(c)
if not c_is_stringy:
c = np.asanyarray(c)
if c.size == x.size:
c = np.ma.ravel(c)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
scales = s # Renamed for readability below.
if c_is_stringy:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if c.size == x.size:
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
faceted = kwargs.pop('faceted', None)
edgecolors = kwargs.get('edgecolors', None)
if faceted is not None:
cbook.warn_deprecated(
'1.2', name='faceted', alternative='edgecolor',
obj_type='option')
if faceted:
edgecolors = None
else:
edgecolors = 'none'
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
if not marker_obj.is_filled():
edgecolors = 'face'
offsets = np.dstack((x, y))
collection = mcoll.PathCollection(
(path,), scales,
facecolors=colors,
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=kwargs.pop('transform', self.transData),
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
# The margin adjustment is a hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important.
# Also, only bother with this padding if there is anything to draw.
if self._xmargin < 0.05 and x.size > 0:
self.set_xmargin(0.05)
if self._ymargin < 0.05 and x.size > 0:
self.set_ymargin(0.05)
self.add_collection(collection)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def hexbin(self, x, y, C=None, gridsize=100, bins=None,
xscale='linear', yscale='linear', extent=None,
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none',
reduce_C_function=np.mean, mincnt=None, marginals=False,
**kwargs):
"""
Make a hexagonal binning plot.
Call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=None, linewidths=None, edgecolors='none'
reduce_C_function = np.mean, mincnt=None, marginals=True
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is *None*
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ *None* | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
*mincnt*: [ *None* | a positive integer ]
If not *None*, only display cells with more than *mincnt*
number of points in the cell
*marginals*: [ *True* | *False* ]
if marginals is *True*, plot the marginal density as
colormapped rectagles along the bottom of the x-axis and
left of the y-axis
*extent*: [ *None* | scalars (left, right, bottom, top) ]
The limits of the bins. The default assigns the limits
based on gridsize, x, y, xscale and yscale.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ *None* | Colormap ]
a :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ *None* | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin* / *vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar between 0 and 1, or *None*
the alpha value for the patches
*linewidths*: [ *None* | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ *None* | ``'none'`` | mpl color | color sequence ]
If ``'none'``, draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collections.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon. If *marginals* is *True*, horizontal
bar and vertical bar (both PolyCollections) will be attached
to the return collection as attributes *hbar* and *vbar*.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold:
self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx / math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale == 'log':
if np.any(x <= 0.0):
raise ValueError("x contains non-positive values, so can not"
" be log-scaled")
x = np.log10(x)
if yscale == 'log':
if np.any(y <= 0.0):
raise ValueError("y contains non-positive values, so can not"
" be log-scaled")
y = np.log10(y)
if extent is not None:
xmin, xmax, ymin, ymax = extent
else:
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# to avoid issues with singular data, expand the min/max pairs
xmin, xmax = mtrans.nonsingular(xmin, xmax, expander=0.1)
ymin, ymax = mtrans.nonsingular(ymin, ymax, expander=0.1)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax - xmin) / nx
sy = (ymax - ymin) / ny
if marginals:
xorig = x.copy()
yorig = y.copy()
x = (x - xmin) / sx
y = (y - ymin) / sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1 * ny1 + nx2 * ny2
d1 = (x - ix1) ** 2 + 3.0 * (y - iy1) ** 2
d2 = (x - ix2 - 0.5) ** 2 + 3.0 * (y - iy2 - 0.5) ** 2
bdist = (d1 < d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1 * ny1]
lattice2 = accum[nx1 * ny1:]
lattice1.shape = (nx1, ny1)
lattice2.shape = (nx2, ny2)
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]] += 1
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]] += 1
# threshold
if mincnt is not None:
for i in xrange(nx1):
for j in xrange(ny1):
if lattice1[i, j] < mincnt:
lattice1[i, j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
if lattice2[i, j] < mincnt:
lattice2[i, j] = np.nan
accum = np.hstack((lattice1.astype(float).ravel(),
lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
else:
if mincnt is None:
mincnt = 0
# create accumulation arrays
lattice1 = np.empty((nx1, ny1), dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i, j] = []
lattice2 = np.empty((nx2, ny2), dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i, j] = []
for i in xrange(len(x)):
if bdist[i]:
if ((ix1[i] >= 0) and (ix1[i] < nx1) and
(iy1[i] >= 0) and (iy1[i] < ny1)):
lattice1[ix1[i], iy1[i]].append(C[i])
else:
if ((ix2[i] >= 0) and (ix2[i] < nx2) and
(iy2[i] >= 0) and (iy2[i] < ny2)):
lattice2[ix2[i], iy2[i]].append(C[i])
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i, j]
if len(vals) > mincnt:
lattice1[i, j] = reduce_C_function(vals)
else:
lattice1[i, j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i, j]
if len(vals) > mincnt:
lattice2[i, j] = reduce_C_function(vals)
else:
lattice2[i, j] = np.nan
accum = np.hstack((lattice1.astype(float).ravel(),
lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
offsets = np.zeros((n, 2), float)
offsets[:nx1 * ny1, 0] = np.repeat(np.arange(nx1), ny1)
offsets[:nx1 * ny1, 1] = np.tile(np.arange(ny1), nx1)
offsets[nx1 * ny1:, 0] = np.repeat(np.arange(nx2) + 0.5, ny2)
offsets[nx1 * ny1:, 1] = np.tile(np.arange(ny2), nx2) + 0.5
offsets[:, 0] *= sx
offsets[:, 1] *= sy
offsets[:, 0] += xmin
offsets[:, 1] += ymin
# remove accumulation bins with no data
offsets = offsets[good_idxs, :]
accum = accum[good_idxs]
polygon = np.zeros((6, 2), float)
polygon[:, 0] = sx * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:, 1] = sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
if edgecolors == 'none':
edgecolors = 'face'
if xscale == 'log' or yscale == 'log':
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
if xscale == 'log':
polygons[:, :, 0] = 10.0 ** polygons[:, :, 0]
xmin = 10.0 ** xmin
xmax = 10.0 ** xmax
self.set_xscale(xscale)
if yscale == 'log':
polygons[:, :, 1] = 10.0 ** polygons[:, :, 1]
ymin = 10.0 ** ymin
ymax = 10.0 ** ymax
self.set_yscale(yscale)
collection = mcoll.PolyCollection(
polygons,
edgecolors=edgecolors,
linewidths=linewidths,
)
else:
collection = mcoll.PolyCollection(
[polygon],
edgecolors=edgecolors,
linewidths=linewidths,
offsets=offsets,
transOffset=mtransforms.IdentityTransform(),
offset_position="data"
)
if isinstance(norm, mcolors.LogNorm):
if (accum == 0).any():
# make sure we have not zeros
accum += 1
# autoscale the norm with curren accum values if it hasn't
# been set
if norm is not None:
if norm.vmin is None and norm.vmax is None:
norm.autoscale(accum)
# Transform accum if needed
if bins == 'log':
accum = np.log10(accum + 1)
elif bins is not None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins -= 1 # one less edge than bins
bins = minimum + (maximum - minimum) * np.arange(bins) / bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim(corners)
self.autoscale_view(tight=True)
# add the collection last
self.add_collection(collection, autolim=False)
if not marginals:
return collection
if C is None:
C = np.ones(len(x))
def coarse_bin(x, y, coarse):
ind = coarse.searchsorted(x).clip(0, len(coarse) - 1)
mus = np.zeros(len(coarse))
for i in range(len(coarse)):
mu = reduce_C_function(y[ind == i])
mus[i] = mu
return mus
coarse = np.linspace(xmin, xmax, gridsize)
xcoarse = coarse_bin(xorig, C, coarse)
valid = ~np.isnan(xcoarse)
verts, values = [], []
for i, val in enumerate(xcoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(thismin, 0),
(thismin, 0.05),
(thismax, 0.05),
(thismax, 0)])
values.append(val)
values = np.array(values)
trans = self.get_xaxis_transform(which='grid')
hbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
hbar.set_array(values)
hbar.set_cmap(cmap)
hbar.set_norm(norm)
hbar.set_alpha(alpha)
hbar.update(kwargs)
self.add_collection(hbar, autolim=False)
coarse = np.linspace(ymin, ymax, gridsize)
ycoarse = coarse_bin(yorig, C, coarse)
valid = ~np.isnan(ycoarse)
verts, values = [], []
for i, val in enumerate(ycoarse):
thismin = coarse[i]
if i < len(coarse) - 1:
thismax = coarse[i + 1]
else:
thismax = thismin + np.diff(coarse)[-1]
if not valid[i]:
continue
verts.append([(0, thismin), (0.0, thismax),
(0.05, thismax), (0.05, thismin)])
values.append(val)
values = np.array(values)
trans = self.get_yaxis_transform(which='grid')
vbar = mcoll.PolyCollection(verts, transform=trans, edgecolors='face')
vbar.set_array(values)
vbar.set_cmap(cmap)
vbar.set_norm(norm)
vbar.set_alpha(alpha)
vbar.update(kwargs)
self.add_collection(vbar, autolim=False)
collection.hbar = hbar
collection.vbar = vbar
def on_changed(collection):
hbar.set_cmap(collection.get_cmap())
hbar.set_clim(collection.get_clim())
vbar.set_cmap(collection.get_cmap())
vbar.set_clim(collection.get_clim())
collection.callbacksSM.connect('changed', on_changed)
return collection
@docstring.dedent_interpd
def arrow(self, x, y, dx, dy, **kwargs):
"""
Add an arrow to the axes.
Call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*). Uses FancyArrow patch to construct the arrow.
The resulting arrow is affected by the axes aspect ratio and limits.
This may produce an arrow whose head is not square with its stem. To
create an arrow whose head is square with its stem, use
:meth:`annotate` for example::
ax.annotate("", xy=(0.5, 0.5), xytext=(0, 0),
arrowprops=dict(arrowstyle="->"))
Optional kwargs control the arrow construction and properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
# Strip away units for the underlying patch since units
# do not make sense to most patch-like code
x = self.convert_xunits(x)
y = self.convert_yunits(y)
dx = self.convert_xunits(dx)
dy = self.convert_yunits(dy)
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold:
self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, autolim=True)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def stackplot(self, x, *args, **kwargs):
return mstack.stackplot(self, x, *args, **kwargs)
stackplot.__doc__ = mstack.stackplot.__doc__
def streamplot(self, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None, zorder=1):
if not self._hold:
self.cla()
stream_container = mstream.streamplot(self, x, y, u, v,
density=density,
linewidth=linewidth,
color=color,
cmap=cmap,
norm=norm,
arrowsize=arrowsize,
arrowstyle=arrowstyle,
minlength=minlength,
transform=transform,
zorder=zorder)
return stream_container
streamplot.__doc__ = mstream.streamplot.__doc__
@docstring.dedent_interpd
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold:
self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b, autolim=True)
self.autoscale_view()
return b
@docstring.dedent_interpd
def fill(self, *args, **kwargs):
"""
Plot filled polygons.
Call signature::
fill(*args, **kwargs)
*args* is a variable length argument, allowing for multiple
*x*, *y* pairs with an optional color format string; see
:func:`~matplotlib.pyplot.plot` for details on the argument
parsing. For example, to plot a polygon with vertices at *x*,
*y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, e.g., shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/lines_bars_and_markers/fill_demo.py
"""
if not self._hold:
self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch(poly)
patches.append(poly)
self.autoscale_view()
return patches
@docstring.dedent_interpd
def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
**kwargs):
"""
Make filled polygons between two curves.
Call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x* :
An N-length array of the x data
*y1* :
An N-length array (or scalar) of the y data
*y2* :
An N-length array (or scalar) of the y data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is an N-length numpy boolean array and the fill will
only happen over the regions where ``where==True``.
*interpolate* :
If *True*, interpolate between the two lines to find the
precise point of intersection. Otherwise, the start and
end points of the filled region will only occur on explicit
values in the *x* array.
*kwargs* :
Keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`.
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between_demo.py
.. seealso::
:meth:`fill_betweenx`
for filling between two sets of x-values
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = ma.masked_invalid(self.convert_xunits(x))
y1 = ma.masked_invalid(self.convert_yunits(y1))
y2 = ma.masked_invalid(self.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x) * y1
if y2.ndim == 0:
y2 = np.ones_like(x) * y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2 * N + 2, 2), np.float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind - 1, 0)
x_values = x[im1:ind + 1]
diff_values = y1[im1:ind + 1] - y2[im1:ind + 1]
y1_values = y1[im1:ind + 1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
diff_root_y = np.interp(diff_root_x, x_values, y1_values)
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N + 1] = end
X[1:N + 1, 0] = xslice
X[1:N + 1, 1] = y1slice
X[N + 2:, 0] = xslice[::-1]
X[N + 2:, 1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
@docstring.dedent_interpd
def fill_betweenx(self, y, x1, x2=0, where=None, **kwargs):
"""
Make filled polygons between two horizontal curves.
Call signature::
fill_betweenx(y, x1, x2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *x1* and *x2* where
``where==True``
*y* :
An N-length array of the y data
*x1* :
An N-length array (or scalar) of the x data
*x2* :
An N-length array (or scalar) of the x data
*where* :
If *None*, default to fill between everywhere. If not *None*,
it is a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs* :
keyword args passed on to the
:class:`~matplotlib.collections.PolyCollection`
kwargs control the :class:`~matplotlib.patches.Polygon` properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_betweenx_demo.py
.. seealso::
:meth:`fill_between`
for filling between two sets of y-values
"""
# Handle united data, such as dates
self._process_unit_info(ydata=y, xdata=x1, kwargs=kwargs)
self._process_unit_info(xdata=x2)
# Convert the arrays so we can work with them
y = ma.masked_invalid(self.convert_yunits(y))
x1 = ma.masked_invalid(self.convert_xunits(x1))
x2 = ma.masked_invalid(self.convert_xunits(x2))
if x1.ndim == 0:
x1 = np.ones_like(y) * x1
if x2.ndim == 0:
x2 = np.ones_like(y) * x2
if where is None:
where = np.ones(len(y), np.bool)
else:
where = np.asarray(where, np.bool)
if not (y.shape == x1.shape == x2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (y, x1, x2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
yslice = y[ind0:ind1]
x1slice = x1[ind0:ind1]
x2slice = x2[ind0:ind1]
if not len(yslice):
continue
N = len(yslice)
Y = np.zeros((2 * N + 2, 2), np.float)
# the purpose of the next two lines is for when x2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the x1 sample points do
Y[0] = x2slice[0], yslice[0]
Y[N + 1] = x2slice[-1], yslice[-1]
Y[1:N + 1, 0] = x1slice
Y[1:N + 1, 1] = yslice
Y[N + 2:, 0] = x2slice[::-1]
Y[N + 2:, 1] = yslice[::-1]
polys.append(Y)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
X1Y = np.array([x1[where], y[where]]).T
X2Y = np.array([x2[where], y[where]]).T
self.dataLim.update_from_data_xy(X1Y, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(X2Y, self.ignore_existing_data_limits,
updatex=True, updatey=False)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection
#### plotting z(x,y): imshow, pcolor and relatives, contour
@docstring.dedent_interpd
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=None, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
Display an image on the axes.
Parameters
-----------
X : array_like, shape (n, m) or (n, m, 3) or (n, m, 4)
Display the image in `X` to current axes. `X` may be a float
array, a uint8 array or a PIL image. If `X` is an array, it
can have the following shapes:
- MxN -- luminance (grayscale, float array only)
- MxNx3 -- RGB (float or uint8 array)
- MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays
should be in the range 0.0 to 1.0; MxN float arrays may be
normalised.
cmap : `~matplotlib.colors.Colormap`, optional, default: None
If None, default to rc `image.cmap` value. `cmap` is ignored when
`X` has RGB(A) information
aspect : ['auto' | 'equal' | scalar], optional, default: None
If 'auto', changes the image aspect ratio to match that of the
axes.
If 'equal', and `extent` is None, changes the axes aspect ratio to
match that of the image. If `extent` is not `None`, the axes
aspect ratio is changed to match that of the extent.
If None, default to rc ``image.aspect`` value.
interpolation : string, optional, default: None
Acceptable values are 'none', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser',
'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc',
'lanczos'
If `interpolation` is None, default to rc `image.interpolation`.
See also the `filternorm` and `filterrad` parameters.
If `interpolation` is 'none', then no interpolation is performed
on the Agg, ps and pdf backends. Other backends will fall back to
'nearest'.
norm : `~matplotlib.colors.Normalize`, optional, default: None
A `~matplotlib.colors.Normalize` instance is used to scale
luminance data to 0, 1. If `None`, use the default
func:`normalize`. `norm` is only used if `X` is an array of
floats.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
alpha : scalar, optional, default: None
The alpha blending value, between 0 (transparent) and 1 (opaque)
origin : ['upper' | 'lower'], optional, default: None
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If None, default to rc `image.origin`.
extent : scalars (left, right, bottom, top), optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
shape : scalars (columns, rows), optional, default: None
For raw buffer images
filternorm : scalar, optional, default: 1
A parameter for the antigrain image resize filter. From the
antigrain documentation, if `filternorm` = 1, the filter
normalizes integer values and corrects the rounding errors. It
doesn't do anything with the source floating point values, it
corrects only integers according to the rule of 1.0 which means
that any sum of pixel weights must be equal to 1.0. So, the
filter function must produce a graph of the proper shape.
filterrad : scalar, optional, default: 4.0
The filter radius for filters that have a radius parameter, i.e.
when interpolation is one of: 'sinc', 'lanczos' or 'blackman'
Returns
--------
image : `~matplotlib.image.AxesImage`
Other parameters
----------------
kwargs : `~matplotlib.artist.Artist` properties.
See also
--------
matshow : Plot a matrix or an array as an image.
Examples
--------
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold:
self.cla()
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
if aspect is None:
aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
self.add_image(im)
return im
@staticmethod
def _pcolorargs(funcname, *args, **kw):
# This takes one kwarg, allmatch.
# If allmatch is True, then the incoming X, Y, C must
# have matching dimensions, taking into account that
# X and Y can be 1-D rather than 2-D. This perfect
# match is required for Gouroud shading. For flat
# shading, X and Y specify boundaries, so we need
# one more boundary than color in each direction.
# For convenience, and consistent with Matlab, we
# discard the last row and/or column of C if necessary
# to meet this condition. This is done if allmatch
# is False.
allmatch = kw.pop("allmatch", False)
if len(args) == 1:
C = args[0]
numRows, numCols = C.shape
if allmatch:
X, Y = np.meshgrid(np.arange(numCols), np.arange(numRows))
else:
X, Y = np.meshgrid(np.arange(numCols + 1),
np.arange(numRows + 1))
return X, Y, C
if len(args) == 3:
X, Y, C = args
numRows, numCols = C.shape
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) != 2 or X.shape[0] == 1:
x = X.reshape(1, Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) != 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
if allmatch:
if not (Nx == numCols and Ny == numRows):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
else:
if not (numCols in (Nx, Nx - 1) and numRows in (Ny, Ny - 1)):
raise TypeError('Dimensions of C %s are incompatible with'
' X (%d) and/or Y (%d); see help(%s)' % (
C.shape, Nx, Ny, funcname))
C = C[:Ny - 1, :Nx - 1]
return X, Y, C
@docstring.dedent_interpd
def pcolor(self, *args, **kwargs):
"""
Create a pseudocolor plot of a 2-D array.
.. note::
pcolor can be very slow for large arrays; consider
using the similar but much faster
:func:`~matplotlib.pyplot.pcolormesh` instead.
Call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
MATLAB.
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='none'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ *None* | ``'none'`` | color | color sequence]
If *None*, the rc setting is used by default.
If ``'none'``, edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
*snap*: bool
Whether to snap the mesh to pixel boundaries.
Return value is a :class:`matplotlib.collections.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the MATLAB convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
is equivalent to::
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand(len(x), len(y))
then you need to transpose C::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
MATLAB :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collections.PolyCollection` properties:
%(PolyCollection)s
.. note::
The default *antialiaseds* is False if the default
*edgecolors*="none" is used. This eliminates artificial lines
at patch boundaries, and works regardless of the value of
alpha. If *edgecolors* is not "none", then the default
*antialiaseds* is taken from
rcParams['patch.antialiased'], which defaults to *True*.
Stroking the edges may be preferred if *alpha* is 1, but
will cause artifacts otherwise.
.. seealso::
:func:`~matplotlib.pyplot.pcolormesh`
For an explanation of the differences between
pcolor and pcolormesh.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if 'shading' in kwargs:
cbook.warn_deprecated(
'1.2', name='shading', alternative='edgecolors',
obj_type='option')
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args, allmatch=False)
Ny, Nx = X.shape
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X) + ma.getmaskarray(Y)
xymask = (mask[0:-1, 0:-1] + mask[1:, 1:] +
mask[0:-1, 1:] + mask[1:, 0:-1])
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C) + xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask == 0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1, 0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1, 0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:, 0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:, 0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:, 1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:, 1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1, 1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1, 1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:, newaxis], Y1[:, newaxis],
X2[:, newaxis], Y2[:, newaxis],
X3[:, newaxis], Y3[:, newaxis],
X4[:, newaxis], Y4[:, newaxis],
X1[:, newaxis], Y1[:, newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
C = compress(ravelmask, ma.filled(C[0:Ny - 1, 0:Nx - 1]).ravel())
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if shading == 'faceted':
edgecolors = 'k',
else:
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
# aa setting will default via collections to patch.antialiased
# unless the boundary is not stroked, in which case the
# default will be False; with unstroked boundaries, aa
# makes artifacts that are often disturbing.
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and (is_string_like(ec) and
ec.lower() == "none"):
kwargs['antialiaseds'] = False
kwargs.setdefault('snap', False)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([x, y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
self.add_collection(collection, autolim=False)
return collection
@docstring.dedent_interpd
def pcolormesh(self, *args, **kwargs):
"""
Plot a quadrilateral mesh.
Call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
Create a pseudocolor plot of a 2-D array.
pcolormesh is similar to :func:`~matplotlib.pyplot.pcolor`,
but uses a different mechanism and returns a different
object; pcolor returns a
:class:`~matplotlib.collections.PolyCollection` but pcolormesh
returns a
:class:`~matplotlib.collections.QuadMesh`. It is much faster,
so it is almost always preferred for large arrays.
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*, use
rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either is *None*, it
is autoscaled to the respective min or max
of the color array *C*. If not *None*, *vmin* or
*vmax* passed in here override any pre-existing values
supplied in the *norm* instance.
*shading*: [ 'flat' | 'gouraud' ]
'flat' indicates a solid color for each quad. When
'gouraud', each quad will be Gouraud shaded. When gouraud
shading, edgecolors is ignored.
*edgecolors*: [*None* | ``'None'`` | ``'face'`` | color |
color sequence]
If *None*, the rc setting is used by default.
If ``'None'``, edges will not be visible.
If ``'face'``, edges will have the same color as the faces.
An mpl color or sequence of colors will set the edge color
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is a :class:`matplotlib.collections.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh` properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat').lower()
antialiased = kwargs.pop('antialiased', False)
kwargs.setdefault('edgecolors', 'None')
allmatch = (shading == 'gouraud')
X, Y, C = self._pcolorargs('pcolormesh', *args, allmatch=allmatch)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = C.ravel()
X = X.ravel()
Y = Y.ravel()
# unit conversion allows e.g. datetime objects as axis values
self._process_unit_info(xdata=X, ydata=Y, kwargs=kwargs)
X = self.convert_xunits(X)
Y = self.convert_yunits(Y)
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords,
antialiased=antialiased, shading=shading, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_clim(vmin, vmax)
collection.autoscale_None()
self.grid(False)
# Transform from native to data coordinates?
t = collection._transform
if (not isinstance(t, mtransforms.Transform)
and hasattr(t, '_as_mpl_transform')):
t = t._as_mpl_transform(self.axes)
if t and any(t.contains_branch_seperately(self.transData)):
trans_to_data = t - self.transData
pts = np.vstack([X, Y]).T.astype(np.float)
transformed_pts = trans_to_data.transform(pts)
X = transformed_pts[..., 0]
Y = transformed_pts[..., 1]
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
self.add_collection(collection, autolim=False)
return collection
@docstring.dedent_interpd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a pcolor-type method that
provides the fastest possible rendering with the Agg
backend, and that can handle any quadrilateral grid.
It supports only flat shading (no outlines), it lacks
support for log scaling of the axes, and it does not
have a pyplot wrapper.
Call signatures::
ax.pcolorfast(C, **kwargs)
ax.pcolorfast(xr, yr, C, **kwargs)
ax.pcolorfast(x, y, C, **kwargs)
ax.pcolorfast(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``ax.pcolorfast(C, **kwargs)`` is equivalent to
``ax.pcolorfast([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ *None* | Colormap ]
A :class:`matplotlib.colors.Colormap` instance from cm. If *None*,
use rc settings.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to scale
luminance data to 0,1. If *None*, defaults to normalize()
*vmin*/*vmax*: [ *None* | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max
of the color array *C* is used. If you pass a norm instance,
*vmin* and *vmax* will be *None*.
*alpha*: ``0 <= scalar <= 1`` or *None*
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a :class:`~matplotlib.collections.QuadMesh`
collection in the general quadrilateral case.
"""
if not self._hold:
self.cla()
alpha = kwargs.pop('alpha', None)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None:
assert(isinstance(norm, mcolors.Normalize))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01 * np.abs(dx.mean()) and
np.ptp(dy) < 0.01 * np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc + 1
Ny = nr + 1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0, edgecolors="None")
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection, autolim=False)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.add_image(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.add_image(im)
ret = im
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold:
self.cla()
kwargs['filled'] = False
return mcontour.QuadContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.QuadContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold:
self.cla()
kwargs['filled'] = True
return mcontour.QuadContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.QuadContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
@docstring.dedent_interpd
def table(self, **kwargs):
"""
Add a table to the current axes.
Call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Returns a :class:`matplotlib.table.Table` instance. For finer
grained control over tables, use the
:class:`~matplotlib.table.Table` class and add it to the axes
with :meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
#### Data analysis
@docstring.dedent_interpd
def hist(self, x, bins=10, range=None, normed=False, weights=None,
cumulative=False, bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False,
color=None, label=None, stacked=False,
**kwargs):
"""
Plot a histogram.
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Multiple data can be provided via *x* as a list of datasets
of potentially different length ([*x0*, *x1*, ...]), or as
a 2-D ndarray in which each column is a dataset. Note that
the ndarray form is transposed relative to the list form.
Masked arrays are not supported at present.
Parameters
----------
x : (n,) array or sequence of (n,) arrays
Input values, this takes either a single array or a sequency of
arrays which are not required to be of the same length
bins : integer or array_like, optional
If an integer is given, `bins + 1` bin edges are returned,
consistently with :func:`numpy.histogram` for numpy version >=
1.3.
Unequally spaced bins are supported if `bins` is a sequence.
default is 10
range : tuple or None, optional
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, `range` is (x.min(), x.max()). Range
has no effect if `bins` is a sequence.
If `bins` is a sequence or `range` is specified, autoscaling
is based on the specified bin range instead of the
range of x.
Default is ``None``
normed : boolean, optional
If `True`, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)`dbin)``, i.e., the integral of the histogram will sum
to 1. If *stacked* is also *True*, the sum of the histograms is
normalized to 1.
Default is ``False``
weights : (n, ) array_like or None, optional
An array of weights, of the same shape as `x`. Each value in `x`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1.
Default is ``None``
cumulative : boolean, optional
If `True`, then a histogram is computed where each bin gives the
counts in that bin plus all bins for smaller values. The last bin
gives the total number of datapoints. If `normed` is also `True`
then the histogram is normalized such that the last bin equals 1.
If `cumulative` evaluates to less than 0 (e.g., -1), the direction
of accumulation is reversed. In this case, if `normed` is also
`True`, then the histogram is normalized such that the first bin
equals 1.
Default is ``False``
bottom : array_like, scalar, or None
Location of the bottom baseline of each bin. If a scalar,
the base line for each bin is shifted by the same amount.
If an array, each bin is shifted independently and the length
of bottom must match the number of bins. If None, defaults to 0.
Default is ``None``
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, optional
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
Default is 'bar'
align : {'left', 'mid', 'right'}, optional
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
Default is 'mid'
orientation : {'horizontal', 'vertical'}, optional
If 'horizontal', `~matplotlib.pyplot.barh` will be used for
bar-type histograms and the *bottom* kwarg will be the left edges.
rwidth : scalar or None, optional
The relative width of the bars as a fraction of the bin width. If
`None`, automatically compute the width.
Ignored if `histtype` is 'step' or 'stepfilled'.
Default is ``None``
log : boolean, optional
If `True`, the histogram axis will be set to a log scale. If `log`
is `True` and `x` is a 1D array, empty bins will be filtered out
and only the non-empty (`n`, `bins`, `patches`) will be returned.
Default is ``False``
color : color or array_like of colors or None, optional
Color spec or sequence of color specs, one per dataset. Default
(`None`) uses the standard line color sequence.
Default is ``None``
label : string or None, optional
String, or sequence of strings to match multiple datasets. Bar
charts yield multiple patches per dataset, but only the first gets
the label, so that the legend command will work as expected.
default is ``None``
stacked : boolean, optional
If `True`, multiple data are stacked on top of each other If
`False` multiple data are aranged side by side if histtype is
'bar' or on top of each other if histtype is 'step'
Default is ``False``
Returns
-------
n : array or list of arrays
The values of the histogram bins. See **normed** and **weights**
for a description of the possible semantics. If input **x** is an
array, then this is an array of length **nbins**. If input is a
sequence arrays ``[data1, data2,..]``, then this is a list of
arrays with the values of the histograms for each of the arrays
in the same order.
bins : array
The edges of the bins. Length nbins + 1 (nbins left edges and right
edge of last bin). Always a single array even when multiple data
sets are passed in.
patches : list or list of lists
Silent list of individual patches used to create the histogram
or list of such list if multiple input datasets.
Other Parameters
----------------
kwargs : `~matplotlib.patches.Patch` properties
See also
--------
hist2d : 2D histograms
Notes
-----
Until numpy release 1.5, the underlying numpy histogram function was
incorrect with `normed`=`True` if bin sizes were unequal. MPL
inherited that error. It is now corrected within MPL when using
earlier numpy versions.
Examples
--------
.. plot:: mpl_examples/statistics/histogram_demo_features.py
"""
if not self._hold:
self.cla()
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in numpy !!!
# Validate string inputs here so we don't have to clutter
# subsequent code.
if histtype not in ['bar', 'barstacked', 'step', 'stepfilled']:
raise ValueError("histtype %s is not recognized" % histtype)
if align not in ['left', 'mid', 'right']:
raise ValueError("align kwarg %s is not recognized" % align)
if orientation not in ['horizontal', 'vertical']:
raise ValueError(
"orientation kwarg %s is not recognized" % orientation)
if histtype == 'barstacked' and not stacked:
stacked = True
# Check whether bins or range are given explicitly.
binsgiven = (cbook.iterable(bins) or bin_range is not None)
# basic input validation
flat = np.ravel(x)
if len(flat) == 0:
raise ValueError("x must have at least one data point")
elif len(flat) == 1 and not binsgiven:
raise ValueError(
"x has only one data point. bins or range kwarg must be given")
# Massage 'x' for processing.
# NOTE: Be sure any changes here is also done below to 'weights'
if isinstance(x, np.ndarray) or not iterable(x[0]):
# TODO: support masked arrays;
x = np.asarray(x)
if x.ndim == 2:
x = x.T # 2-D input with columns as datasets; switch to rows
elif x.ndim == 1:
x = x.reshape(1, x.shape[0]) # new view, single row
else:
raise ValueError("x must be 1D or 2D")
if x.shape[1] < x.shape[0]:
warnings.warn(
'2D hist input should be nsamples x nvariables;\n '
'this looks transposed (shape is %d x %d)' % x.shape[::-1])
else:
# multiple hist with data of different length
x = [np.asarray(xi) for xi in x]
nx = len(x) # number of datasets
if color is None:
color = [six.next(self._get_lines.color_cycle)
for i in xrange(nx)]
else:
color = mcolors.colorConverter.to_rgba_array(color)
if len(color) != nx:
raise ValueError("color kwarg must have one color per dataset")
# We need to do to 'weights' what was done to 'x'
if weights is not None:
if isinstance(weights, np.ndarray) or not iterable(weights[0]):
w = np.array(weights)
if w.ndim == 2:
w = w.T
elif w.ndim == 1:
w.shape = (1, w.shape[0])
else:
raise ValueError("weights must be 1D or 2D")
else:
w = [np.asarray(wi) for wi in weights]
if len(w) != nx:
raise ValueError('weights should have the same shape as x')
for i in xrange(nx):
if len(w[i]) != len(x[i]):
raise ValueError(
'weights should have the same shape as x')
else:
w = [None]*nx
# Save the datalimits for the same reason:
_saved_bounds = self.dataLim.bounds
# If bins are not specified either explicitly or via range,
# we need to figure out the range required for all datasets,
# and supply that to np.histogram.
if not binsgiven:
xmin = np.inf
xmax = -np.inf
for xi in x:
if len(xi) > 0:
xmin = min(xmin, xi.min())
xmax = max(xmax, xi.max())
bin_range = (xmin, xmax)
#hist_kwargs = dict(range=range, normed=bool(normed))
# We will handle the normed kwarg within mpl until we
# get to the point of requiring numpy >= 1.5.
hist_kwargs = dict(range=bin_range)
n = []
mlast = None
for i in xrange(nx):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
m = m.astype(float) # causes problems later if it's an int
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
if normed and not stacked:
db = np.diff(bins)
m = (m.astype(float) / db) / m.sum()
if stacked:
if mlast is None:
mlast = np.zeros(len(bins)-1, m.dtype)
m += mlast
mlast[:] = m
n.append(m)
if stacked and normed:
db = np.diff(bins)
for m in n:
m[:] = (m.astype(float) / db) / n[-1].sum()
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None, None, -1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
# Save autoscale state for later restoration; turn autoscaling
# off so we can do it all a single time at the end, instead
# of having it done by bar or fill and then having to be redone.
_saved_autoscalex = self.get_autoscalex_on()
_saved_autoscaley = self.get_autoscaley_on()
self.set_autoscalex_on(False)
self.set_autoscaley_on(False)
totwidth = np.diff(bins)
if rwidth is not None:
dr = min(1.0, max(0.0, rwidth))
elif len(n) > 1:
dr = 0.8
else:
dr = 1.0
if histtype == 'bar' and not stacked:
width = dr*totwidth/nx
dw = width
if nx > 1:
boffset = -0.5*dr*totwidth*(1.0-1.0/nx)
else:
boffset = 0.0
stacked = False
elif histtype == 'barstacked' or stacked:
width = dr*totwidth
boffset, dw = 0.0, 0.0
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
if orientation == 'horizontal':
_barfunc = self.barh
bottom_kwarg = 'left'
else: # orientation == 'vertical'
_barfunc = self.bar
bottom_kwarg = 'bottom'
for m, c in zip(n, color):
if bottom is None:
bottom = np.zeros(len(m), np.float)
if stacked:
height = m - bottom
else:
height = m
patch = _barfunc(bins[:-1]+boffset, height, width,
align='center', log=log,
color=c, **{bottom_kwarg: bottom})
patches.append(patch)
if stacked:
bottom[:] = m
boffset += dw
self.set_autoscalex_on(_saved_autoscalex)
self.set_autoscaley_on(_saved_autoscaley)
self.autoscale_view()
elif histtype.startswith('step'):
# these define the perimeter of the polygon
x = np.zeros(4 * len(bins) - 3, np.float)
y = np.zeros(4 * len(bins) - 3, np.float)
x[0:2*len(bins)-1:2], x[1:2*len(bins)-1:2] = bins, bins[:-1]
x[2*len(bins)-1:] = x[1:2*len(bins)-1][::-1]
if bottom is None:
bottom = np.zeros(len(bins)-1, np.float)
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = bottom, bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
if log:
if orientation == 'horizontal':
self.set_xscale('log', nonposx='clip')
logbase = self.xaxis._scale.base
else: # orientation == 'vertical'
self.set_yscale('log', nonposy='clip')
logbase = self.yaxis._scale.base
# Setting a minimum of 0 results in problems for log plots
if normed or weights is not None:
# For normed data, set to log base * minimum data value
# (gives 1 full tick-label unit for the lowest filled bin)
ndata = np.array(n)
minimum = (np.min(ndata[ndata > 0])) / logbase
else:
# For non-normed data, set the min to log base,
# again so that there is 1 full tick-label unit
# for the lowest bin
minimum = 1.0 / logbase
y[0], y[-1] = minimum, minimum
else:
minimum = np.min(bins)
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
# If fill kwarg is set, it will be passed to the patch collection,
# overriding this
fill = (histtype == 'stepfilled')
xvals, yvals = [], []
for m in n:
if stacked:
# starting point for drawing polygon
y[0] = y[1]
# top of the previous polygon becomes the bottom
y[2*len(bins)-1:] = y[1:2*len(bins)-1][::-1]
# set the top of this polygon
y[1:2*len(bins)-1:2], y[2:2*len(bins):2] = (m + bottom,
m + bottom)
if log:
y[y < minimum] = minimum
if orientation == 'horizontal':
xvals.append(y.copy())
yvals.append(x.copy())
else:
xvals.append(x.copy())
yvals.append(y.copy())
if fill:
# add patches in reverse order so that when stacking,
# items lower in the stack are plottted on top of
# items higher in the stack
for x, y, c in reversed(list(zip(xvals, yvals, color))):
patches.append(self.fill(
x, y,
closed=True,
facecolor=c))
else:
for x, y, c in reversed(list(zip(xvals, yvals, color))):
split = 2 * len(bins)
patches.append(self.fill(
x[:split], y[:split],
closed=False, edgecolor=c,
fill=False))
# we return patches, so put it back in the expected order
patches.reverse()
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin0 = max(_saved_bounds[0]*0.9, minimum)
xmax = self.dataLim.intervalx[1]
for m in n:
if np.sum(m) > 0: # make sure there are counts
xmin = np.amin(m[m != 0])
# filter out the 0 height bins
xmin = max(xmin*0.9, minimum)
xmin = min(xmin0, xmin)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin0 = max(_saved_bounds[1]*0.9, minimum)
ymax = self.dataLim.intervaly[1]
for m in n:
if np.sum(m) > 0: # make sure there are counts
ymin = np.amin(m[m != 0])
# filter out the 0 height bins
ymin = max(ymin*0.9, minimum)
ymin = min(ymin0, ymin)
self.dataLim.intervaly = (ymin, ymax)
if label is None:
labels = [None]
elif is_string_like(label):
labels = [label]
else:
labels = [str(lab) for lab in label]
for (patch, lbl) in zip_longest(patches, labels, fillvalue=None):
if patch:
p = patch[0]
p.update(kwargs)
if lbl is not None:
p.set_label(lbl)
p.set_snap(False)
for p in patch[1:]:
p.update(kwargs)
p.set_label('_nolegend_')
if binsgiven:
if orientation == 'vertical':
self.update_datalim(
[(bins[0], 0), (bins[-1], 0)], updatey=False)
else:
self.update_datalim(
[(0, bins[0]), (0, bins[-1])], updatex=False)
if nx == 1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
@docstring.dedent_interpd
def hist2d(self, x, y, bins=10, range=None, normed=False, weights=None,
cmin=None, cmax=None, **kwargs):
"""
Make a 2D histogram plot.
Parameters
----------
x, y: array_like, shape (n, )
Input values
bins: [None | int | [int, int] | array_like | [array, array]]
The bin specification:
- If int, the number of bins for the two dimensions
(nx=ny=bins).
- If [int, int], the number of bins in each dimension
(nx, ny = bins).
- If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
- If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
The default value is 10.
range : array_like shape(2, 2), optional, default: None
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the bins parameters): [[xmin,
xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
normed : boolean, optional, default: False
Normalize histogram.
weights : array_like, shape (n, ), optional, default: None
An array of values w_i weighing each sample (x_i, y_i).
cmin : scalar, optional, default: None
All bins that has count less than cmin will not be displayed and
these count values in the return value count histogram will also
be set to nan upon return
cmax : scalar, optional, default: None
All bins that has count more than cmax will not be displayed (set
to none before passing to imshow) and these count values in the
return value count histogram will also be set to nan upon return
Returns
-------
The return value is ``(counts, xedges, yedges, Image)``.
Other parameters
-----------------
kwargs : :meth:`pcolorfast` properties.
See also
--------
hist : 1D histogram
Notes
-----
Rendering the histogram with a logarithmic color scale is
accomplished by passing a :class:`colors.LogNorm` instance to
the *norm* keyword argument. Likewise, power-law normalization
(similar in effect to gamma correction) can be accomplished with
:class:`colors.PowerNorm`.
Examples
--------
.. plot:: mpl_examples/pylab_examples/hist2d_demo.py
"""
# xrange becomes range after 2to3
bin_range = range
range = __builtins__["range"]
h, xedges, yedges = np.histogram2d(x, y, bins=bins, range=bin_range,
normed=normed, weights=weights)
if cmin is not None:
h[h < cmin] = None
if cmax is not None:
h[h > cmax] = None
pc = self.pcolorfast(xedges, yedges, h.T, **kwargs)
self.set_xlim(xedges[0], xedges[-1])
self.set_ylim(yedges[0], yedges[-1])
return h, xedges, yedges, pc
@docstring.dedent_interpd
def psd(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
"""
Plot the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, return_line=None, **kwargs)
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`,
with a scaling to correct for power loss due to windowing.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*return_line*: bool
Whether to include the line object plotted in the returned values.
Default is False.
If *return_line* is False, returns the tuple (*Pxx*, *freqs*).
If *return_line* is True, returns the tuple (*Pxx*, *freqs*. *line*):
*Pxx*: 1-D array
The values for the power spectrum `P_{xx}` before scaling
(real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxx*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returend if *return_line* is True.
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
.. seealso::
:func:`specgram`
:func:`specgram` differs in the default overlap; in not
returning the mean of the segment periodograms; in returning
the times of the segments; and in plotting a colormap instead
of a line.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitude spectrum.
:func:`csd`
:func:`csd` plots the spectral density between two signals.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
pxx, freqs = mlab.psd(x=x, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
line = self.plot(freqs, 10 * np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
logi = int(np.log10(intv))
if logi == 0:
logi = .1
step = 10 * logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxx, freqs
else:
return pxx, freqs, line
@docstring.dedent_interpd
def csd(self, x, y, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, return_line=None, **kwargs):
"""
Plot the cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, return_line=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*: 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*return_line*: bool
Whether to include the line object plotted in the returned values.
Default is False.
If *return_line* is False, returns the tuple (*Pxy*, *freqs*).
If *return_line* is True, returns the tuple (*Pxy*, *freqs*. *line*):
*Pxy*: 1-D array
The values for the cross spectrum `P_{xy}` before scaling
(complex valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxy*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function.
Only returend if *return_line* is True.
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xy})` for decibels, though `P_{xy}` itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso::
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
pxy, freqs = mlab.csd(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
line = self.plot(freqs, 10 * np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax - vmin
step = 10 * int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax) + 1, step)
self.set_yticks(ticks)
if return_line is None or not return_line:
return pxy, freqs
else:
return pxy, freqs, line
@docstring.dedent_interpd
def magnitude_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, scale=None,
**kwargs):
"""
Plot the magnitude spectrum.
Call signature::
magnitude_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the magnitude spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to
the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*scale*: [ 'default' | 'linear' | 'dB' ]
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'density',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'linear'.
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the magnitude spectrum before scaling (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`psd`
:func:`psd` plots the power spectral density.`.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can plot the magnitude spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
if scale is None or scale == 'default':
scale = 'linear'
spec, freqs = mlab.magnitude_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
if scale == 'linear':
Z = spec
yunits = 'energy'
elif scale == 'dB':
Z = 20. * np.log10(spec)
yunits = 'dB'
else:
raise ValueError('Unknown scale %s', scale)
lines = self.plot(freqs, Z, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Magnitude (%s)' % yunits)
return spec, freqs, lines[0]
@docstring.dedent_interpd
def angle_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the angle spectrum.
Call signature::
angle_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the angle spectrum (wrapped phase spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the angle spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`magnitude_spectrum`
:func:`angle_spectrum` plots the magnitudes of the
corresponding frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` plots the unwrapped version of this
function.
:func:`specgram`
:func:`specgram` can plot the angle spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
spec, freqs = mlab.angle_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Angle (radians)')
return spec, freqs, lines[0]
@docstring.dedent_interpd
def phase_spectrum(self, x, Fs=None, Fc=None, window=None,
pad_to=None, sides=None, **kwargs):
"""
Plot the phase spectrum.
Call signature::
phase_spectrum(x, Fs=2, Fc=0, window=mlab.window_hanning,
pad_to=None, sides='default', **kwargs)
Compute the phase spectrum (unwrapped angle spectrum) of *x*.
Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*spectrum*, *freqs*, *line*):
*spectrum*: 1-D array
The values for the phase spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
*line*: a :class:`~matplotlib.lines.Line2D` instance
The line created by this function
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/spectrum_demo.py
.. seealso::
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` plots the magnitudes of the
corresponding frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` plots the wrapped version of this
function.
:func:`specgram`
:func:`specgram` can plot the phase spectrum of segments
within the signal in a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
spec, freqs = mlab.phase_spectrum(x=x, Fs=Fs, window=window,
pad_to=pad_to, sides=sides)
freqs += Fc
lines = self.plot(freqs, spec, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Phase (radians)')
return spec, freqs, lines[0]
@docstring.dedent_interpd
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
Plot the coherence between *x* and *y*.
Call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
Plot the coherence between *x* and *y*. Coherence is the
normalized cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 0 (no overlap).
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold:
self.cla()
cxy, freqs = mlab.cohere(x=x, y=y, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap,
scale_by_freq=scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
@docstring.dedent_interpd
def specgram(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None,
cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, **kwargs):
"""
Plot a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, mode='default', scale='default',
**kwargs)
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*. The spectrogram is plotted as a colormap
(using imshow).
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*mode*: [ 'default' | 'psd' | 'magnitude' | 'angle' | 'phase' ]
What sort of spectrum to use. Default is 'psd'. which takes
the power spectral density. 'complex' returns the complex-valued
frequency spectrum. 'magnitude' returns the magnitude spectrum.
'angle' returns the phase spectrum without unwrapping. 'phase'
returns the phase spectrum with unwrapping.
*noverlap*: integer
The number of points of overlap between blocks. The
default value is 128.
*scale*: [ 'default' | 'linear' | 'dB' ]
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'psd',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'dB' if *mode* is 'psd' or
'magnitude' and 'linear' otherwise. This must be 'linear'
if *mode* is 'angle' or 'phase'.
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`~matplotlib.mlab.specgram`
*kwargs*:
Additional kwargs are passed on to imshow which makes the
specgram image
.. note::
*detrend* and *scale_by_freq* only apply when *mode* is set to
'psd'
Returns the tuple (*spectrum*, *freqs*, *t*, *im*):
*spectrum*: 2-D array
columns are the periodograms of successive segments
*freqs*: 1-D array
The frequencies corresponding to the rows in *spectrum*
*t*: 1-D array
The times corresponding to midpoints of segments (i.e the columns
in *spectrum*)
*im*: instance of class :class:`~matplotlib.image.AxesImage`
The image created by imshow containing the spectrogram
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; in not returning
times; and in generating a line plot instead of colormap.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'magnitude'. Plots a line instead of a colormap.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'angle'. Plots a line instead of a colormap.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'phase'. Plots a line instead of a colormap.
"""
if not self._hold:
self.cla()
if Fc is None:
Fc = 0
if mode == 'complex':
raise ValueError('Cannot plot a complex specgram')
if scale is None or scale == 'default':
if mode in ['angle', 'phase']:
scale = 'linear'
else:
scale = 'dB'
elif mode in ['angle', 'phase'] and scale == 'dB':
raise ValueError('Cannot use dB scale with angle or phase mode')
spec, freqs, t = mlab.specgram(x=x, NFFT=NFFT, Fs=Fs,
detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if scale == 'linear':
Z = spec
elif scale == 'dB':
if mode is None or mode == 'default' or mode == 'psd':
Z = 10. * np.log10(spec)
else:
Z = 20. * np.log10(spec)
else:
raise ValueError('Unknown scale %s', scale)
Z = np.flipud(Z)
if xextent is None:
xextent = 0, np.amax(t)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, vmin=vmin, vmax=vmax,
**kwargs)
self.axis('auto')
return spec, freqs, t, im
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', origin="upper", **kwargs):
"""
Plot the sparsity pattern on a 2-D array.
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
Parameters
----------
Z : sparse array (n, m)
The array to be plotted.
precision : float, optional, default: 0
If *precision* is 0, any non-zero value will be plotted; else,
values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a special
case: if *precision* is 'present', any value present in the array
will be plotted, even if it is identically zero.
origin : ["upper", "lower"], optional, default: "upper"
Place the [0,0] index of the array in the upper left or lower left
corner of the axes.
aspect : ['auto' | 'equal' | scalar], optional, default: "equal"
If 'equal', and `extent` is None, changes the axes aspect ratio to
match that of the image. If `extent` is not `None`, the axes
aspect ratio is changed to match that of the extent.
If 'auto', changes the image aspect ratio to match that of the
axes.
If None, default to rc ``image.aspect`` value.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
See also
--------
imshow : for image options.
plot : for plotting options
"""
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z) > precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc - 0.5, nr - 0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin=origin, **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z) > precision
y, x = np.nonzero(nonzero)
if marker is None:
marker = 's'
if markersize is None:
markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc - 0.5)
self.set_ylim(ymin=nr - 0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
"""
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed, with the first
row at the top. Row and column numbering is zero-based.
Parameters
----------
Z : array_like shape (n, m)
The matrix to be displayed.
Returns
-------
image : `~matplotlib.image.AxesImage`
Other parameters
----------------
kwargs : `~matplotlib.axes.Axes.imshow` arguments
Sets `origin` to 'upper', 'interpolation' to 'nearest' and
'aspect' to equal.
See also
--------
imshow : plot an image
Examples
--------
.. plot:: mpl_examples/pylab_examples/matshow.py
"""
Z = np.asanyarray(Z)
nr, nc = Z.shape
kw = {'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
def violinplot(self, dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None):
"""Make a violin plot.
Call signature::
violinplot(dataset, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False,
points=100, bw_method=None):
Make a violin plot for each column of *dataset* or each vector in
sequence *dataset*. Each filled area extends to represent the
entire data range, with optional lines at the mean, the median,
the minimum, and the maximum.
Parameters
----------
dataset : Array or a sequence of vectors.
The input data.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, creates a vertical violin plot.
Otherwise, creates a horizontal violin plot.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If `True`, will toggle rendering of the means.
showextrema : bool, default = True
If `True`, will toggle rendering of the extrema.
showmedians : bool, default = False
If `True`, will toggle rendering of the medians.
points : scalar, default = 100
Defines the number of points to evaluate each of the
gaussian kernel density estimations at.
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as its only
parameter and return a scalar. If None (default), 'scott' is used.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the
:class:`matplotlib.collections.PolyCollection` instances
containing the filled area of each violin.
- ``means``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the mean values of each of the
violin's distribution.
- ``mins``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the bottom of each violin's
distribution.
- ``maxes``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the top of each violin's
distribution.
- ``bars``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the centers of each violin's
distribution.
- ``medians``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the median values of each of the
violin's distribution.
"""
def _kde_method(X, coords):
kde = mlab.GaussianKDE(X, bw_method)
return kde.evaluate(coords)
vpstats = cbook.violin_stats(dataset, _kde_method, points=points)
return self.violin(vpstats, positions=positions, vert=vert,
widths=widths, showmeans=showmeans,
showextrema=showextrema, showmedians=showmedians)
def violin(self, vpstats, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False):
"""Drawing function for violin plots.
Call signature::
violin(vpstats, positions=None, vert=True, widths=0.5,
showmeans=False, showextrema=True, showmedians=False):
Draw a violin plot for each column of `vpstats`. Each filled area
extends to represent the entire data range, with optional lines at the
mean, the median, the minimum, and the maximum.
Parameters
----------
vpstats : list of dicts
A list of dictionaries containing stats for each violin plot.
Required keys are:
- ``coords``: A list of scalars containing the coordinates that
the violin's kernel density estimate were evaluated at.
- ``vals``: A list of scalars containing the values of the
kernel density estimate at each of the coordinates given
in *coords*.
- ``mean``: The mean value for this violin's dataset.
- ``median``: The median value for this violin's dataset.
- ``min``: The minimum value for this violin's dataset.
- ``max``: The maximum value for this violin's dataset.
positions : array-like, default = [1, 2, ..., n]
Sets the positions of the violins. The ticks and limits are
automatically set to match the positions.
vert : bool, default = True.
If true, plots the violins veritcally.
Otherwise, plots the violins horizontally.
widths : array-like, default = 0.5
Either a scalar or a vector that sets the maximal width of
each violin. The default is 0.5, which uses about half of the
available horizontal space.
showmeans : bool, default = False
If true, will toggle rendering of the means.
showextrema : bool, default = True
If true, will toggle rendering of the extrema.
showmedians : bool, default = False
If true, will toggle rendering of the medians.
Returns
-------
result : dict
A dictionary mapping each component of the violinplot to a
list of the corresponding collection instances created. The
dictionary has the following keys:
- ``bodies``: A list of the
:class:`matplotlib.collections.PolyCollection` instances
containing the filled area of each violin.
- ``means``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the mean values of each of the
violin's distribution.
- ``mins``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the bottom of each violin's
distribution.
- ``maxes``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the top of each violin's
distribution.
- ``bars``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the centers of each violin's
distribution.
- ``medians``: A
:class:`matplotlib.collections.LineCollection` instance
created to identify the median values of each of the
violin's distribution.
"""
# Statistical quantities to be plotted on the violins
means = []
mins = []
maxes = []
medians = []
# Collections to be returned
artists = {}
N = len(vpstats)
datashape_message = ("List of violinplot statistics and `{0}` "
"values must have the same length")
# Validate positions
if positions is None:
positions = range(1, N + 1)
elif len(positions) != N:
raise ValueError(datashape_message.format("positions"))
# Validate widths
if np.isscalar(widths):
widths = [widths] * N
elif len(widths) != N:
raise ValueError(datashape_message.format("widths"))
# Calculate ranges for statistics lines
pmins = -0.25 * np.array(widths) + positions
pmaxes = 0.25 * np.array(widths) + positions
# Check whether we are rendering vertically or horizontally
if vert:
fill = self.fill_betweenx
perp_lines = self.hlines
par_lines = self.vlines
else:
fill = self.fill_between
perp_lines = self.vlines
par_lines = self.hlines
# Render violins
bodies = []
for stats, pos, width in zip(vpstats, positions, widths):
# The 0.5 factor reflects the fact that we plot from v-p to
# v+p
vals = np.array(stats['vals'])
vals = 0.5 * width * vals / vals.max()
bodies += [fill(stats['coords'],
-vals + pos,
vals + pos,
facecolor='y',
alpha=0.3)]
means.append(stats['mean'])
mins.append(stats['min'])
maxes.append(stats['max'])
medians.append(stats['median'])
artists['bodies'] = bodies
# Render means
if showmeans:
artists['cmeans'] = perp_lines(means, pmins, pmaxes, colors='r')
# Render extrema
if showextrema:
artists['cmaxes'] = perp_lines(maxes, pmins, pmaxes, colors='r')
artists['cmins'] = perp_lines(mins, pmins, pmaxes, colors='r')
artists['cbars'] = par_lines(positions, mins, maxes, colors='r')
# Render medians
if showmedians:
artists['cmedians'] = perp_lines(medians,
pmins,
pmaxes,
colors='r')
return artists
def tricontour(self, *args, **kwargs):
return mtri.tricontour(self, *args, **kwargs)
tricontour.__doc__ = mtri.TriContourSet.tricontour_doc
def tricontourf(self, *args, **kwargs):
return mtri.tricontourf(self, *args, **kwargs)
tricontourf.__doc__ = mtri.TriContourSet.tricontour_doc
def tripcolor(self, *args, **kwargs):
return mtri.tripcolor(self, *args, **kwargs)
tripcolor.__doc__ = mtri.tripcolor.__doc__
def triplot(self, *args, **kwargs):
return mtri.triplot(self, *args, **kwargs)
triplot.__doc__ = mtri.triplot.__doc__
| lgpl-3.0 |
florian-f/sklearn | sklearn/datasets/tests/test_mldata.py | 1 | 5233 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
aewhatley/scikit-learn | benchmarks/bench_multilabel_metrics.py | 86 | 7286 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
nhejazi/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 49 | 3080 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import clean_warning_registry
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_array_equal(chi2.get_support(indices=True), [0])
assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_array_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chi2_unused_feature():
# Unused feature should evaluate to NaN
# and should issue no runtime warning
clean_warning_registry()
with warnings.catch_warnings(record=True) as warned:
warnings.simplefilter('always')
chi, p = chi2([[1, 0], [0, 0]], [1, 0])
for w in warned:
if 'divide by zero' in repr(w):
raise AssertionError('Found unexpected warning %s' % w)
assert_array_equal(chi, [1, np.nan])
assert_array_equal(p[1], np.nan)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
nmercier/linux-cross-gcc | linux/lib/python2.7/dist-packages/numpy/core/function_base.py | 23 | 6891 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y = y * delta
else:
# One might be tempted to use faster, in-place multiplication here,
# but this prevents step from overriding what class is produced,
# and thus prevents, e.g., use of Quantities; see gh-7142.
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| bsd-3-clause |
Rocamadour7/ml_tutorial | 02. Regression/main.py | 1 | 1623 | from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import random
style.use('fivethirtyeight')
# xs = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)
# ys = np.array([5, 4, 6, 5, 6, 7], dtype=np.float64)
def create_dataset(how_much, variance, step=2, correlation='pos'):
val = 1
ys = []
for i in range(how_much):
y = val + random.randrange(-variance, variance)
ys.append(y)
if correlation and correlation == 'pos':
val += step
elif correlation and correlation == 'neg':
val -= step
xs = [i for i in range(len(ys))]
return np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64)
def best_fit_slope_and_intercept(xs, ys):
m = ((mean(xs) * mean(ys)) - mean(xs * ys)) / (mean(xs) ** 2 - mean(xs ** 2))
b = mean(ys) - m*mean(xs)
return m, b
def squared_error(ys_original, ys_line):
return sum((ys_line - ys_original)**2)
def coefficient_of_determination(ys_original, ys_line):
y_mean_line = mean(ys_original)
squared_error_regr = squared_error(ys_original, ys_line)
squared_error_y_mean = squared_error(ys_original, y_mean_line)
return 1 - (squared_error_regr / squared_error_y_mean)
xs, ys = create_dataset(40, 10, 2, correlation='pos')
m, b = best_fit_slope_and_intercept(xs, ys)
regression_line = [m * x + b for x in xs]
predict_x = 8
predict_y = m * predict_x + b
r_squared = coefficient_of_determination(ys, regression_line)
print(r_squared)
plt.scatter(xs, ys)
plt.scatter(predict_x, predict_y)
plt.plot(xs, regression_line)
plt.show()
| mit |
dsullivan7/scikit-learn | sklearn/qda.py | 21 | 7639 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
GoogleCloudPlatform/mlops-on-gcp | on_demand/kfp-caip-sklearn/lab-03-kfp-cicd/pipeline/covertype_training_pipeline.py | 6 | 7511 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP pipeline orchestrating BigQuery and Cloud AI Platform services."""
import os
from helper_components import evaluate_model
from helper_components import retrieve_best_run
from jinja2 import Template
import kfp
from kfp.components import func_to_container_op
from kfp.dsl.types import Dict
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv('BASE_IMAGE')
TRAINER_IMAGE = os.getenv('TRAINER_IMAGE')
RUNTIME_VERSION = os.getenv('RUNTIME_VERSION')
PYTHON_VERSION = os.getenv('PYTHON_VERSION')
COMPONENT_URL_SEARCH_PREFIX = os.getenv('COMPONENT_URL_SEARCH_PREFIX')
USE_KFP_SA = os.getenv('USE_KFP_SA')
TRAINING_FILE_PATH = 'datasets/training/data.csv'
VALIDATION_FILE_PATH = 'datasets/validation/data.csv'
TESTING_FILE_PATH = 'datasets/testing/data.csv'
# Parameter defaults
SPLITS_DATASET_ID = 'splits'
HYPERTUNE_SETTINGS = """
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
"""
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
"""Prepares the data sampling query."""
sampling_query_template = """
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
"""
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1])
return query
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
mlengine_deploy_op = component_store.load_component('ml_engine/deploy')
retrieve_best_run_op = func_to_container_op(
retrieve_best_run, base_image=BASE_IMAGE)
evaluate_model_op = func_to_container_op(evaluate_model, base_image=BASE_IMAGE)
@kfp.dsl.pipeline(
name='Covertype Classifier Training',
description='The pipeline training and deploying the Covertype classifierpipeline_yaml'
)
def covertype_train(project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location='US'):
"""Orchestrates training and deployment of an sklearn model."""
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4])
training_file_path = '{}/{}'.format(gcs_root, TRAINING_FILE_PATH)
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=training_file_path,
dataset_location=dataset_location)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8])
validation_file_path = '{}/{}'.format(gcs_root, VALIDATION_FILE_PATH)
create_validation_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=validation_file_path,
dataset_location=dataset_location)
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = '{}/{}'.format(gcs_root, TESTING_FILE_PATH)
create_testing_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=testing_file_path,
dataset_location=dataset_location)
# Tune hyperparameters
tune_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--hptune', 'True'
]
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir/hypertune',
kfp.dsl.RUN_ID_PLACEHOLDER)
hypertune = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=tune_args,
training_input=hypertune_settings)
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir', kfp.dsl.RUN_ID_PLACEHOLDER)
train_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--alpha',
get_best_trial.outputs['alpha'], '--max_iter',
get_best_trial.outputs['max_iter'], '--hptune', 'False'
]
train_model = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=train_args)
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs['output_gcs_path']),
model_path=str(train_model.outputs['job_dir']),
metric_name=evaluation_metric_name)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(
eval_model.outputs['metric_value'] > evaluation_metric_threshold):
deploy_model = mlengine_deploy_op(
model_uri=train_model.outputs['job_dir'],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == 'True':
kfp.dsl.get_pipeline_conf().add_op_transformer(use_gcp_secret('user-gcp-sa'))
| apache-2.0 |
e-koch/pyspeckit | pyspeckit/wrappers/fitnh3.py | 1 | 16066 | """
NH3 fitter wrapper
==================
Wrapper to fit ammonia spectra. Generates a reasonable guess at the position
and velocity using a gaussian fit
Example use:
.. code:: python
import pyspeckit
sp11 = pyspeckit.Spectrum('spec.nh3_11.dat', errorcol=999)
sp22 = pyspeckit.Spectrum('spec.nh3_22.dat', errorcol=999)
sp33 = pyspeckit.Spectrum('spec.nh3_33.dat', errorcol=999)
sp11.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['oneone']
sp22.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['twotwo']
sp33.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['threethree']
input_dict={'oneone':sp11, 'twotwo':sp22, 'threethree':sp33}
spf = pyspeckit.wrappers.fitnh3.fitnh3tkin(input_dict)
Note that if you want to use the plotter wrapper with cubes, you need to do
something like the following, where the ``plot_special`` method of the stacked
``cubes`` object is set to the ``plotter_override`` function defined in the
fitnh3_wrapper code:
.. code:: python
cubes.plot_special = pyspeckit.wrappers.fitnh3.plotter_override
cubes.plot_special_kwargs = {'fignum':3, 'vrange':[55,135]}
cubes.plot_spectrum(160,99)
"""
from __future__ import print_function
import warnings
from astropy.extern.six.moves import xrange
from astropy.extern.six import iteritems
import pyspeckit
from .. import spectrum
from ..spectrum.classes import Spectrum, Spectra
from ..spectrum import units
from ..spectrum.models import ammonia_constants
import numpy as np
import copy
import random
from astropy import log
from astropy import units as u
pyspeckit.spectrum.fitters.default_Registry.add_fitter('ammonia_tau_thin',
pyspeckit.spectrum.models.ammonia.ammonia_model_vtau_thin(),
5)
title_dict = {'oneone':'NH$_3(1, 1)$', 'twotwo':'NH$_3(2, 2)$',
'threethree':'NH$_3(3, 3)$', 'fourfour':'NH$_3(4, 4)$',
'fivefive':'NH$_3(5, 5)$', 'sixsix':'NH$_3(6, 6)$',
'sevenseven':'NH$_3(7, 7)$', 'eighteight':'NH$_3(8, 8)$',
}
def fitnh3tkin(input_dict, dobaseline=True, baselinekwargs={}, crop=False,
cropunit=None, guessline='twotwo', tex=15, trot=20, column=15.0,
fortho=0.66, tau=None, thin=False, quiet=False, doplot=True,
fignum=1, guessfignum=2, smooth=False, scale_keyword=None,
rebase=False, tkin=None, npeaks=1, guesses=None,
fittype='ammonia',
guess_error=True, plotter_wrapper_kwargs={}, **kwargs):
"""
Given a dictionary of filenames and lines, fit them together
e.g. {'oneone':'G000.000+00.000_nh3_11.fits'}
Parameters
----------
input_dict : dict
A dictionary in which the keys are the ammonia line names (e.g.,
'oneone', 'twotwo', etc) and the values are either Spectrum objects
or filenames of spectra
dobaseline : bool
Fit and subtract a baseline prior to fitting the model?
Keyword arguments to `pyspeckit.spectrum.Spectrum.baseline` are
specified in ``baselinekwargs``.
baselinekwargs : dict
The keyword arguments for the baseline
crop : bool or tuple
A range of values to crop the spectrum to. The units are specified by
``cropunit``; the default ``None`` will use pixels. If False, no
cropping will be performed.
cropunit : None or astropy unit
The unit for the crop parameter
guess_error : bool
Use the guess line to estimate the error in all spectra?
plotter_wrapper_kwargs : dict
Keyword arguments to pass to the plotter
fittype: 'ammonia' or 'cold_ammonia'
The fitter model to use. This is overridden if `tau` is specified,
in which case one of the `ammonia_tau` models is used (see source code)
"""
if tkin is not None:
if trot == 20 or trot is None:
trot = tkin
else:
raise ValueError("Please specify trot, not tkin")
warnings.warn("Keyword 'tkin' is deprecated; use trot instead", DeprecationWarning)
spdict = dict([(linename, Spectrum(value, scale_keyword=scale_keyword))
if type(value) is str else (linename, value)
for linename, value in iteritems(input_dict)
])
splist = spdict.values()
for transition, sp in spdict.items(): # required for plotting, cropping
sp.xarr.convert_to_unit('km/s', velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz,
quiet=True)
if crop and len(crop) == 2:
for sp in splist:
sp.crop(*crop, unit=cropunit)
if dobaseline:
for sp in splist:
sp.baseline(**baselinekwargs)
if smooth and type(smooth) is int:
for sp in splist:
sp.smooth(smooth)
spdict[guessline].specfit(fittype='gaussian', negamp=False, vheight=False,
guesses='moments')
ampguess, vguess, widthguess = spdict[guessline].specfit.modelpars
if widthguess < 0:
raise ValueError("Width guess was < 0. This is impossible.")
print("RMS guess (errspec): ", spdict[guessline].specfit.errspec.mean())
print("RMS guess (residuals): ", spdict[guessline].specfit.residuals.std())
errguess = spdict[guessline].specfit.residuals.std()
if rebase:
# redo baseline subtraction excluding the centroid +/- about 20 km/s
vlow = spdict[guessline].specfit.modelpars[1]-(19.8+spdict[guessline].specfit.modelpars[2]*2.35)
vhigh = spdict[guessline].specfit.modelpars[1]+(19.8+spdict[guessline].specfit.modelpars[2]*2.35)
for sp in splist:
sp.baseline(exclude=[vlow, vhigh], **baselinekwargs)
for sp in splist:
if guess_error:
sp.error[:] = errguess
sp.xarr.convert_to_unit(u.GHz)
if doplot:
spdict[guessline].plotter(figure=guessfignum)
spdict[guessline].specfit.plot_fit()
spectra = Spectra(splist)
spectra.specfit.npeaks = npeaks
if tau is not None:
if guesses is None:
guesses = [a for i in xrange(npeaks) for a in
(trot+random.random()*i, tex, tau+random.random()*i,
widthguess+random.random()*i, vguess+random.random()*i,
fortho)]
fittype = 'ammonia_tau_thin' if thin else 'ammonia_tau'
spectra.specfit(fittype=fittype, quiet=quiet, guesses=guesses,
**kwargs)
else:
if guesses is None:
guesses = [a for i in xrange(npeaks) for a in
(trot+random.random()*i, tex, column+random.random()*i,
widthguess+random.random()*i, vguess+random.random()*i,
fortho)]
if thin:
raise ValueError("'thin' keyword not supported for the generic ammonia model")
spectra.specfit(fittype=fittype, quiet=quiet, guesses=guesses,
**kwargs)
if doplot:
plot_nh3(spdict, spectra, fignum=fignum, **plotter_wrapper_kwargs)
return spdict, spectra
def plot_nh3(spdict, spectra, fignum=1, show_components=False,
residfignum=None, show_hyperfine_components=True, annotate=True,
axdict=None, figure=None,
**plotkwargs):
"""
Plot the results from a multi-nh3 fit
spdict needs to be dictionary with form:
'oneone': spectrum,
'twotwo': spectrum,
etc.
"""
from matplotlib import pyplot
if figure is None:
spectra.plotter.figure = pyplot.figure(fignum)
spectra.plotter.axis = spectra.plotter.figure.gca()
splist = spdict.values()
for transition, sp in spdict.items():
sp.xarr.convert_to_unit('km/s', velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[transition]*u.Hz,
quiet=True)
try:
sp.specfit.fitter = copy.copy(spectra.specfit.fitter)
sp.specfit.fitter.npeaks = spectra.specfit.npeaks
except AttributeError:
pass
sp.specfit.modelpars = spectra.specfit.modelpars
sp.specfit.parinfo = spectra.specfit.parinfo
sp.specfit.npeaks = spectra.specfit.npeaks
if spectra.specfit.modelpars is not None:
sp.specfit.model = sp.specfit.fitter.n_ammonia(pars=spectra.specfit.modelpars, parnames=spectra.specfit.fitter.parnames)(sp.xarr)
if axdict is None:
axdict = make_axdict(splist, spdict)
for linename, sp in iteritems(spdict):
if linename not in axdict:
raise NotImplementedError("Plot windows for {0} cannot "
"be automatically arranged (yet)."
.format(linename))
sp.plotter.axis=axdict[linename] # permanent
sp.plotter(axis=axdict[linename], title=title_dict[linename], **plotkwargs)
sp.specfit.Spectrum.plotter = sp.plotter
sp.specfit.selectregion(reset=True)
if sp.specfit.modelpars is not None:
sp.specfit.plot_fit(annotate=False, show_components=show_components,
show_hyperfine_components=show_hyperfine_components)
if spdict['oneone'].specfit.modelpars is not None and annotate:
spdict['oneone'].specfit.annotate(labelspacing=0.05,
prop={'size':'small',
'stretch':'extra-condensed'},
frameon=False)
if residfignum is not None:
pyplot.figure(residfignum)
pyplot.clf()
axdict = make_axdict(splist, spdict)
for linename, sp in iteritems(spdict):
sp.specfit.plotresiduals(axis=axdict[linename])
def make_axdict(splist, spdict):
from matplotlib import pyplot
axdict = {}
if len(splist) == 2:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
axdict[linename] = pyplot.subplot(2,1,ii)
ii+=1
elif len(splist) == 3:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
if ii == 1:
axdict[linename] = pyplot.subplot(2,1,ii)
ii+=2
else:
axdict[linename] = pyplot.subplot(2,2,ii)
ii+=1
elif len(splist) == 4:
ii = 1
for linename in ammonia_constants.line_names:
if linename in spdict:
axdict[linename] = pyplot.subplot(2,2,ii)
ii+=1
else:
raise NotImplementedError("Plots with {0} subplots are not yet "
"implemented. Pull requests are "
"welcome!".format(len(splist)))
return axdict
def fitnh3(spectrum, vrange=[-100, 100], vrangeunit='km/s', quiet=False, Tex=20,
trot=15, column=1e15, fortho=1.0, tau=None, Tkin=None,
fittype='ammonia',
spec_convert_kwargs={}):
if Tkin is not None:
if trot == 20 or trot is None:
trot = Tkin
else:
raise ValueError("Please specify trot, not Tkin")
warnings.warn("Keyword 'Tkin' is deprecated; use trot instead", DeprecationWarning)
if vrange:
spectrum.xarr.convert_to_unit(vrangeunit, **spec_convert_kwargs)
spectrum.crop(*vrange, unit=vrangeunit)
spectrum.specfit(fittype='gaussian', negamp=False, guesses='moments')
ampguess, vguess, widthguess = spectrum.specfit.modelpars
if tau is None:
spectrum.specfit(fittype=fittype, quiet=quiet,
guesses=[Tex, trot, column, widthguess, vguess,
fortho])
else:
spectrum.specfit(fittype='ammonia_tau', quiet=quiet,
guesses=[Tex, trot, tau, widthguess, vguess, fortho])
return spectrum
def BigSpectrum_to_NH3dict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
sp.xarr.convert_to_unit('GHz')
spdict = {}
for linename, freq in iteritems(spectrum.models.ammonia.freq_dict):
if not hasattr(freq, 'unit'):
freq = freq*u.Hz
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
log.debug("line {2}: freq test low, high: {0}, {1}"
.format(freq_test_low, freq_test_high, linename))
if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or
sp.xarr.as_unit('Hz').in_range(freq_test_high)):
spdict[linename] = sp.copy(deep=True)
spdict[linename].xarr.convert_to_unit('GHz')
assert np.all(np.array(spdict[linename].xarr == sp.xarr,
dtype='bool'))
spdict[linename].xarr.refX = freq
spdict[linename].xarr.convert_to_unit('km/s',
velocity_convention='radio',
refX=pyspeckit.spectrum.models.ammonia.freq_dict[linename]*u.Hz,
quiet=True)
np.testing.assert_array_almost_equal(spdict[linename].xarr.as_unit('GHz').value,
sp.xarr.value)
log.debug("Line {0}={2}: {1}".format(linename, spdict[linename],
freq))
if vrange is not None:
try:
spdict[linename] = spdict[linename].slice(start=vrange[0],
stop=vrange[1],
unit='km/s')
log.debug("Successfully cropped {0} to {1}, freq = {2}, {3}"
.format(linename, vrange, freq,
spdict[linename].xarr))
if len(spdict[linename]) == 0:
spdict.pop(linename)
log.debug("Removed {0} from spdict".format(linename))
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
else:
log.debug("Line {0} not in spectrum".format(linename))
# this shouldn't be reachable, but there are reported cases where spdict
# gets populated w/empty spectra, which leads to a failure in producing
# their repr. Since that on its own isn't a very helpful error message,
# we'd rather return the bad spdict and see if the next function down the
# line can survive with a questionable spdict...
try:
log.debug(str(spdict))
except Exception as ex:
log.debug(str(ex))
return spdict
def plotter_override(sp, vrange=None, **kwargs):
"""
Do plot_nh3 with syntax similar to plotter()
"""
spdict = BigSpectrum_to_NH3dict(sp, vrange=vrange)
log.debug("spdict: {0}".format(spdict))
if len(spdict) > 4:
raise ValueError("Too many lines ({0}) found.".format(len(spdict)))
if len(spdict) not in (2, 3, 4):
raise ValueError("Not enough lines; don't need to use the NH3 plot "
"wrapper. If you think you are getting this message "
"incorrectly, check the velocity range (vrange "
"parameter) and make sure your spectrum overlaps with "
" it.")
plot_nh3(spdict, sp, **kwargs)
return spdict
| mit |
datapythonista/pandas | pandas/tests/series/methods/test_is_unique.py | 6 | 1050 | import numpy as np
import pytest
from pandas import Series
from pandas.core.construction import create_series_with_explicit_dtype
@pytest.mark.parametrize(
"data, expected",
[
(np.random.randint(0, 10, size=1000), False),
(np.arange(1000), True),
([], True),
([np.nan], True),
(["foo", "bar", np.nan], True),
(["foo", "foo", np.nan], False),
(["foo", "bar", np.nan, np.nan], False),
],
)
def test_is_unique(data, expected):
# GH#11946 / GH#25180
ser = create_series_with_explicit_dtype(data, dtype_if_empty=object)
assert ser.is_unique is expected
def test_is_unique_class_ne(capsys):
# GH#20661
class Foo:
def __init__(self, val):
self._value = val
def __ne__(self, other):
raise Exception("NEQ not supported")
with capsys.disabled():
li = [Foo(i) for i in range(5)]
ser = Series(li, index=list(range(5)))
ser.is_unique
captured = capsys.readouterr()
assert len(captured.err) == 0
| bsd-3-clause |
pypot/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
madmax983/h2o-3 | h2o-py/h2o/model/dim_reduction.py | 2 | 2505 | from model_base import ModelBase
from metrics_base import *
class H2ODimReductionModel(ModelBase):
def num_iterations(self):
"""
Get the number of iterations that it took to converge or reach max iterations.
:return: number of iterations (integer)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('number_of_iterations')]
def objective(self):
"""
Get the final value of the objective function from the GLRM model.
:return: final objective value (double)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_objective_value')]
def final_step(self):
"""
Get the final step size from the GLRM model.
:return: final step size (double)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_step_size')]
def archetypes(self):
"""
:return: the archetypes (Y) of the GLRM model.
"""
o = self._model_json["output"]
yvals = o["archetypes"].cell_values
archetypes = []
for yidx, yval in enumerate(yvals):
archetypes.append(list(yvals[yidx])[1:])
return archetypes
def screeplot(self, type="barplot", **kwargs):
"""
Produce the scree plot
:param type: type of plot. "barplot" and "lines" currently supported
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if 'server' in kwargs.keys() and kwargs['server']: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print "matplotlib is required for this function!"
return
variances = [s**2 for s in self._model_json['output']['importance'].cell_values[0][1:]]
plt.xlabel('Components')
plt.ylabel('Variances')
plt.title('Scree Plot')
plt.xticks(range(1,len(variances)+1))
if type == "barplot": plt.bar(range(1,len(variances)+1), variances)
elif type == "lines": plt.plot(range(1,len(variances)+1), variances, 'b--')
if not ('server' in kwargs.keys() and kwargs['server']): plt.show() | apache-2.0 |
secimTools/SECIMTools | src/secimtools/dataManager/interface.py | 2 | 19566 | #!/usr/bin/env python
"""
Secim Tools data interface library.
"""
# Built-in packages
import re
import sys
# Add-on packages
import numpy as np
import pandas as pd
class wideToDesign:
""" Class to handle generic data in a wide format with an associated design file. """
def __init__(self, wide, design, uniqID, group=False, runOrder=False, anno=False, clean_string=True,
infer_sampleID=True, keepSample=True, logger=None):
""" Import and set-up data.
Import data both wide formated data and a design file. Set-up basic
attributes.
:Arguments:
wide (TSV): A table in wide format with compounds/genes as rows and
samples as columns.
Name sample1 sample2 sample3
------------------------------------
one 10 20 10
two 10 20 10
design (TSV): A table relating samples ('sampleID') to groups or
treatments.
sampleID group1 group2
-------------------------
sample1 g1 t1
sample2 g1 t1
sample3 g1 t1
uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
group (str): The name of column names in 'design' that give
group information. For example: treatment
clean_string (bool): If True remove special characters from strings
in dataset.
infer_sampleID (bool): If True infer "sampleID" from different capitalizations.
anno (list): A list of additional annotations that can be used to group
items.
:Returns:
**Attribute**
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.sampleIDs (list): A list of sampleIDs. These will correspond
to columns in self.wide.
self.design (pd.DataFrame): A table relating sampleID to groups.
self.group (list): A list of column names in self.design that give
group information. For example: treatment, tissue
anno (list): A list of additional annotations that can be used to group
items.
self.levels (list): A list of levels in self.group. For example:
trt1, tr2, control.
"""
# Setting logger
if logger is None:
self.logger = False
else:
self.logger = logger
# Saving original str
self.origString = dict()
# Import wide formatted data file
try:
self.uniqID = uniqID
self.wide = pd.read_table(wide)
if clean_string:
self.wide[self.uniqID] = self.wide[self.uniqID].apply(lambda x: self._cleanStr(str(x)))
self.wide.rename(columns=lambda x: self._cleanStr(x), inplace=True)
# Make sure index is a string and not numeric
self.wide[self.uniqID] = self.wide[self.uniqID].astype(str)
# Set index to uniqID column
self.wide.set_index(self.uniqID, inplace=True)
except ValueError:
if self.logger:
self.logger.error("Please make sure that your data file has a column called '{0}'.".format(uniqID))
else:
print(("Please make sure that your data file has a column called '{0}'.".format(uniqID)))
raise ValueError
# Import design file
try:
self.design = pd.read_table(design)
# This part of the script allows the user to use any capitalization of "sampleID"
# ie. "sample Id" would be converted to "sampleID".
# If you want to accept only the exact capitalization turn infer_sampleID to Fake
## AMM added additional backslash to \s in regex below
if infer_sampleID:
renamed = {column: re.sub(r"[s|S][a|A][m|M][p|P][l|L][e|E][\\s?|_?][I|i][d|D]",
"sampleID", column) for column in self.design.columns}
self.design.rename(columns=renamed, inplace=True)
log_msg = "Inferring 'sampleID' from data. This will accept different capitalizations of the word"
if self.logger:
self.logger.info(log_msg)
else:
print(log_msg)
# Make sure index is a string and not numeric
self.design['sampleID'] = self.design['sampleID'].astype(str)
self.design.set_index('sampleID', inplace=True)
#print(self.design)
# Cleaning design file
if clean_string:
self.design.rename(index=lambda x: self._cleanStr(x), inplace=True)
# Create a list of sampleIDs, but first check that they are present
# in the wide data.
self.sampleIDs = list()
for sample in self.design.index.tolist():
if sample in self.wide.columns:
self.sampleIDs.append(sample)
else:
if self.logger:
self.logger.warn("Sample {0} missing in wide dataset".format(sample))
else:
print(("WARNING - Sample {0} missing in wide dataset".format(sample)))
for sample in self.wide.columns.tolist():
if not (sample in self.design.index):
if keepSample:
if self.logger:
self.logger.warn("Sample {0} missing in design file".format(sample))
else:
print(("WARNING - Sample {0} missing in design file".format(sample)))
else:
if self.logger:
self.logger.error("Sample {0} missing in design file".format(sample))
raise
else:
print(("ERROR - Sample {0} missing in design file".format(sample)))
raise
# Drop design rows that are not in the wide data set
self.design = self.design[self.design.index.isin(self.sampleIDs)]
#print("DEBUG: design")
#print(self.design)
# Removing characters from data!!!!!!(EXPERIMENTAL)
self.wide.replace(r'\D', np.nan, regex=True, inplace=True)
# Possible bad design, bare except should not be used
except SystemError:
print(("Error:", sys.exc_info()[0]))
raise
# Save annotations
self.anno = anno
# Save runOrder
self.runOrder = runOrder
# Set up group information
if group:
if clean_string:
self.group = self._cleanStr(group)
self.design.columns = [self._cleanStr(x) for x in self.design.columns]
else:
self.group = group
keep = self.group.split(",")
# combine group, anno and runorder
if self.runOrder and self.anno:
keep = keep + [self.runOrder, ] + self.anno
elif self.runOrder and not self.anno:
keep = keep + [self.runOrder, ]
elif not self.runOrder and self.anno:
keep = keep + self.anno
# Check if groups, runOrder and levels columns exist in the design file
designCols = self.design.columns.tolist()
if keep == designCols:
# Check if columns exist on design file.
self.design = self.design[keep] # Only keep group columns in the design file
self.design[self.group] = self.design[self.group].astype(str) # Make sure groups are strings
# Create list of group levels
grp = self.design.groupby(self.group)
self.levels = sorted(grp.groups.keys()) # Get a list of group levels
else:
self.group = None
# Keep samples listed in design file
if keepSample:
self.keep_sample(self.sampleIDs)
def _cleanStr(self, x):
""" Clean strings so they behave.
For some modules, uniqIDs and groups cannot contain spaces, '-', '*',
'/', '+', or '()'. For example, statsmodel parses the strings and interprets
them in the model.
:Arguments:
x (str): A string that needs cleaning
:Returns:
x (str): The cleaned string.
self.origString (dict): A dictionary where the key is the new
string and the value is the original string. This will be useful
for reverting back to original values.
"""
if isinstance(x, str):
val = x
x = re.sub(r'^-([0-9].*)', r'__\1', x)
x = x.replace(' ', '_')
x = x.replace('.', '_')
x = x.replace('-', '_')
x = x.replace('*', '_')
x = x.replace('/', '_')
x = x.replace('+', '_')
x = x.replace('(', '_')
x = x.replace(')', '_')
x = x.replace('[', '_')
x = x.replace(']', '_')
x = x.replace('{', '_')
x = x.replace('}', '_')
x = x.replace('"', '_')
x = x.replace('\'', '_')
x = re.sub(r'^([0-9].*)', r'_\1', x)
self.origString[x] = val
return x
def revertStr(self, x):
""" Revert strings back to their original value so they behave well.
Clean strings may need to be reverted back to original values for
convience.
:Arguments:
x (str): A string that needs cleaning
self.origString (dict): A dictionary where the key is the cleaned
string and the value is the original string.
:Returns:
x (str): Original string.
"""
if isinstance(x, str) and x in self.origString:
x = self.origString[x]
return x
def melt(self):
""" Convert a wide formated table to a long formated table.
:Arguments:
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
self.sampleIDs (list): An list of sampleIDs. These will correspond
to columns in self.wide.
:Returns:
**Attributes**
self.long (pd.DataFrame): Creates a new attribute called self.long
that also has group information merged to the dataset.
"""
melted = pd.melt(self.wide.reset_index(), id_vars=self.uniqID, value_vars=self.sampleIDs,
var_name='sampleID')
melted.set_index('sampleID', inplace=True)
self.long = melted.join(self.design).reset_index() # merge on group information using sampleIDs as key
def transpose(self):
""" Transpose the wide table and merge on treatment information.
:Arguments:
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.design (pd.DataFrame): A table relating sampleID to groups.
:Returns:
merged (pd.DataFrame): A wide formatted table with sampleID as row
and compound/gene as column. Also has column with group ID.
"""
trans = self.wide[self.sampleIDs].T
# Merge on group information using table index (aka 'sampleID')
merged = trans.join(self.design)
merged.index.name = 'sampleID'
return merged
def getRow(self, ID):
""" Get a row corresponding to a uniqID.
:Arguments:
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
ID (str): A string referring to a uniqID in the dataset.
:Returns:
(pd.DataFrame): with only the corresponding rows from the uniqID.
"""
return self.wide[self.wide[self.uniqID] == ID]
def keep_sample(self, sampleIDs):
"""
Keep only the given sampleIDs in the wide and design file.
:Arguments:
:param list sampleIDs: A list of sampleIDs to keep.
:Returns:
:rtype: wideToDesign
:return: Updates the wideToDesign object to only have those sampleIDs.
"""
self.sampleIDs = sampleIDs
self.wide = self.wide[self.sampleIDs]
self.design = self.design[self.design.index.isin(self.sampleIDs)]
def removeSingle(self):
"""
Removes groups with just one sample
"""
if self.group:
for level, current in self.design.groupby(self.group):
if len(current) < 2:
self.design.drop(current.index, inplace=True)
self.wide.drop(current.index, axis=1, inplace=True)
log_msg = """Your group '{0}' has only one element,"
"this group is going to be removed from"
"further calculations.""".format(level)
if self.logger:
self.logger.warn(log_msg)
else:
print(log_msg)
def dropMissing(self):
"""
Drops rows with missing data
"""
# Asks if any missing value
if np.isnan(self.wide.values).any():
# Count original number of rows
n_rows = len(self.wide.index)
# Drop missing values
self.wide.dropna(inplace=True)
# Count the dropped rows
n_rows_kept = len(self.wide.index)
# Logging!!!
log_msg = """Missing values were found in wide data.
[{0}] rows were dropped""".format(n_rows - n_rows_kept)
if self.logger:
self.logger.warn(log_msg)
else:
print(log_msg)
class annoFormat:
""" Class to handle generic data in a wide format with an associated design file. """
def __init__(self, data, uniqID, mz, rt, anno=False, clean_string=True):
""" Import and set-up data.
Import data both wide formated data and a design file. Set-up basic
attributes.
:Arguments:
wide (TSV): A table in wide format with compounds/genes as rows and
samples as columns.
Name sample1 sample2 sample3
------------------------------------
one 10 20 10
two 10 20 10
design (TSV): A table relating samples ('sampleID') to groups or
treatments.
sampleID group1 group2
-------------------------
sample1 g1 t1
sample2 g1 t1
sample3 g1 t1
uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
group (str): The name of column names in 'design' that give
group information. For example: treatment
clean_string (bool): If True remove special characters from strings
in dataset.
anno (list): A list of additional annotations that can be used to group
items.
:Returns:
**Attribute**
self.uniqID (str): The name of the unique identifier column in 'wide'
(i.e. The column with compound/gene names).
self.wide (pd.DataFrame): A wide formatted table with compound/gene
as row and sample as columns.
self.sampleIDs (list): A list of sampleIDs. These will correspond
to columns in self.wide.
self.design (pd.DataFrame): A table relating sampleID to groups.
self.group (list): A list of column names in self.design that give
group information. For example: treatment, tissue
anno (list): A list of additional annotations that can be used to group
items.
self.levels (list): A list of levels in self.group. For example:
trt1, tr2, control.
"""
self.origString = dict()
# Import anno formatted data file
try:
self.uniqID = uniqID
self.mz = mz
self.rt = rt
# Trying to import
self.data = pd.read_table(data)
if clean_string:
self.data[self.uniqID] = self.data[self.uniqID].apply(lambda x: self._cleanStr(x))
self.data.rename(columns=lambda x: self._cleanStr(x), inplace=True)
# Make sure index is a string and not numeric
self.data[self.uniqID] = self.data[self.uniqID].astype(str)
# Set index to uniqID column
self.data.set_index(self.uniqID, inplace=True)
# If not annotation then ignoring additional columns
self.anno = None
if not(anno):
self.data = self.data[[self.mz, self.rt]]
else:
self.anno = self.data.columns.tolist()
self.anno.remove(self.mz)
self.anno.remove(self.rt)
except ValueError:
print(("Data file must have columns called '{0}','{1}' and '{2}'.".format(uniqID, mz, rt)))
raise ValueError
def _cleanStr(self, x):
""" Clean strings so they behave.
For some modules, uniqIDs and groups cannot contain spaces, '-', '*',
'/', '+', or '()'. For example, statsmodel parses the strings and interprets
them in the model.
:Arguments:
x (str): A string that needs cleaning
:Returns:
x (str): The cleaned string.
self.origString (dict): A dictionary where the key is the new
string and the value is the original string. This will be useful
for reverting back to original values.
"""
if isinstance(x, str):
val = x
x = x.replace(' ', '_')
x = x.replace('.', '_')
x = x.replace('-', '_')
x = x.replace('*', '_')
x = x.replace('/', '_')
x = x.replace('+', '_')
x = x.replace('(', '_')
x = x.replace(')', '_')
x = x.replace('[', '_')
x = x.replace(']', '_')
x = x.replace('{', '_')
x = x.replace('}', '_')
x = x.replace('"', '_')
x = x.replace('\'', '_')
x = re.sub(r'^([0-9].*)', r'_\1', x)
self.origString[x] = val
return x
if __name__ == '__main__':
pass
| mit |
chrishavlin/nyc_taxi_viz | src/taxi_main.py | 1 | 12620 | """
taxi_main.py
module for loading the raw csv taxi files.
Copyright (C) 2016 Chris Havlin, <https://chrishavlin.wordpress.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The database is NOT distributed with the code here.
Data source:
NYC Taxi & Limousine Commision, TLC Trip Record Data
<http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml>
"""
"""--------------
Import libraries:
-----------------"""
import numpy as np
import time,os
import matplotlib.pyplot as plt
from matplotlib import cm
import taxi_plotmod as tpm
import datetime as dt
"""---------
Functions
------------"""
def read_all_variables(f,there_is_a_header,VarImportList):
"""
reads in the raw data from a single file
input:
f file object
there_is_a_header logical flag
VarImportList a list of strings identifying which
data to read in and save
possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
'drop_lat','elapsed_time_min'
output:
Vars a 2D array, each row is a single taxi
pickup instance, each column is a different
Var_list a list of strings where the index of each
entry corresponds to the column of Vars
"""
# count number of lines
indx=0
for line in f:
indx=indx+1
if there_is_a_header:
indx = indx-1
Nlines = indx
# Initizialize Variable Array and List
N_VarImport=len(VarImportList)
Date=np.empty(Nlines,dtype='datetime64[D]')
Vars=np.zeros((indx,N_VarImport))
Var_list=[None] * N_VarImport
# Go back to start of file, loop again to read variables
f.seek(0)
if there_is_a_header:
headerline=f.readline()
indx=0
# loop over lines, store variables
prevprog=0
zero_lines=0
for line in f:
prog= round(float(indx) / float(Nlines-1) * 100)
if prog % 5 == 0 and prog != prevprog and Nlines > 500:
print ' ',int(prog),'% of file read ...'
prevprog=prog
line = line.rstrip()
line = line.split(',')
var_indx = 0
if len(line) == 19:
dates=line[1].split()[0] # the date string, "yyyy-mm-dd"
#dates=dates.split('-')
#dtim=dt.date(int(dates[0]),int(dates[1]),int(dates[2]))
#Date.append(dtim)
Date[indx]=np.datetime64(dates)
if 'pickup_time_hr' in VarImportList:
Vars[indx,var_indx]=datetime_string_to_time(line[1],'hr')
Var_list[var_indx]='pickup_time_hr'
var_indx=var_indx+1
# Vars[indx,var_indx]=np.datetime64(dates)
# Var_list[var_indx]='date'
# var_indx=var_indx+1
if 'dropoff_time_hr' in VarImportList:
Vars[indx,var_indx]=datetime_string_to_time(line[2],'hr')
Var_list[var_indx]='dropoff_time_hr'
var_indx=var_indx+1
if 'dist_mi' in VarImportList:
Vars[indx,var_indx]=float(line[4]) # distance travelled [mi]
Var_list[var_indx]='dist_mi'
var_indx=var_indx+1
if 'elapsed_time_min' in VarImportList:
pickup=datetime_string_to_time(line[1],'hr')*60.0
drop=datetime_string_to_time(line[2],'hr')*60.0
if drop >= pickup:
Vars[indx,var_indx]=drop - pickup
elif drop < pickup:
#print 'whoops:',pickup/60,drop/60,(drop+24*60.-pickup)/60
Vars[indx,var_indx]=drop+24.0*60.0 - pickup
Var_list[var_indx]='elapsed_time_min'
var_indx=var_indx+1
if 'speed_mph' in VarImportList:
pickup=datetime_string_to_time(line[1],'min')
drop=datetime_string_to_time(line[2],'min')
dist=float(line[4]) # [mi]
if drop > pickup:
speed=dist / ((drop - pickup)/60.0) # [mi/hr]
elif drop < pickup:
dT=(drop+24.0*60.0 - pickup)/60.0
speed=dist / dT # [mi/hr]
else:
speed=0
Vars[indx,var_indx]=speed
Var_list[var_indx]='speed_mph'
var_indx=var_indx+1
if 'pickup_lat' in VarImportList:
Vars[indx,var_indx]=float(line[6])
Var_list[var_indx]='pickup_lat'
var_indx=var_indx+1
if 'pickup_lon' in VarImportList:
Vars[indx,var_indx]=float(line[5])
Var_list[var_indx]='pickup_lon'
var_indx=var_indx+1
if 'drop_lat' in VarImportList:
Vars[indx,var_indx]=float(line[10])
Var_list[var_indx]='drop_lat'
var_indx=var_indx+1
if 'drop_lon' in VarImportList:
Vars[indx,var_indx]=float(line[9])
Var_list[var_indx]='drop_lon'
var_indx=var_indx+1
if 'psgger' in VarImportList:
Vars[indx,var_indx]=float(line[3])
Var_list[var_indx]='pssger'
var_indx=var_indx+1
if 'fare' in VarImportList:
Vars[indx,var_indx]=float(line[12])
Var_list[var_indx]='fare'
var_indx=var_indx+1
if 'tips' in VarImportList:
Vars[indx,var_indx]=float(line[15])
Var_list[var_indx]='tips'
var_indx=var_indx+1
if 'payment_type' in VarImportList:
Vars[indx,var_indx]=float(line[11])
Var_list[var_indx]='payment_type'
var_indx=var_indx+1
indx=indx+1
else:
zero_lines=zero_lines+1
# remove zero lines, which will be padded at end
if zero_lines>0:
Vars=Vars[0:Nlines-zero_lines,:]
Date=Date[0:Nlines-zero_lines]
return Vars,Var_list,Date
def datetime_string_to_time(dt_string,time_units):
""" converts datetime string to time in units of time_units
dt_string should be in datetime format: "yyyy-mm-dd hh:mm:ss"
"2016-04-18 18:31:43"
"""
t_string=dt_string.split()[1] # remove the space, take the time string
t_hms=t_string.split(':') # split into hr, min, sec
# unit conversion factors depending on time_units:
if time_units == 'hr':
a = [1.0, 1.0/60.0, 1.0/3600.0]
elif time_units == 'min':
a = [60.0, 1.0, 1.0/60.0]
elif time_units == 'sec':
a = [3600.0, 60.0, 1.0]
time_flt=float(t_hms[0])*a[0]+float(t_hms[1])*a[1]+float(t_hms[2])*a[2]
return time_flt
def read_taxi_files(dir_base,Vars_To_Import):
""" loops over all taxi files in a directory, stores them in memory
input:
dir_base the directory to look for .csv taxi files
Vars_to_Import a list of strings identifying which data to read in and save
possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
'drop_lat','elapsed_time_min'
output:
VarBig a 2D array, each row is a single taxi pickup instance, each column
is a different variable. Data aggregated from all files in directory.
Var_list a list of strings where the index of each entry corresponds to the
column of Vars
"""
N_files=len(os.listdir(dir_base)) # number of files in directory
ifile = 1 # file counter
Elapsed_tot=0 # time counter
#Dates=[]
for fn in os.listdir(dir_base): # loop over directory contents
if os.path.isfile(dir_base+fn): # is the current path obect a file?
flnm=dir_base + fn # construct the file name
print 'Reading File ', ifile,' of ', N_files
start = time.clock() # start timer
fle = open(flnm, 'r') # open the file for reading
# distribute current file to lat/lon bins:
VarChunk,Var_list,DateChunk=read_all_variables(fle,True,Vars_To_Import)
if ifile == 1:
VarBig = VarChunk
Dates=DateChunk#np.array([tuple(DateChunk)], dtype='datetime64[D]')
print Dates.shape,DateChunk.shape,VarChunk.shape
#Dates.extend(DateChunk)
else:
VarBig = np.vstack((VarBig,VarChunk))
#DateChunk=np.array([tuple(DateChunk)],dtype='datetime64[D]')
print Dates.shape,DateChunk.shape,VarChunk.shape
Dates = np.concatenate((Dates,DateChunk))
#Dates.extend(DateChunk)
elapsed=(time.clock()-start) # elapsed time
Elapsed_tot=Elapsed_tot+elapsed # cumulative elapsed
MeanElapsed=Elapsed_tot/ifile # mean time per file
Fls_left=N_files-(ifile) # files remaining
time_left=Fls_left*MeanElapsed/60 # estimated time left
print ' aggregation took %.1f sec' % elapsed
print ' estimated time remaning: %.1f min' % time_left
fle.close() # close current file
ifile = ifile+1 # increment file counter
return VarBig,Var_list,Dates
def write_gridded_file(write_dir,Var,VarCount,x,y,Varname):
""" writes out the spatially binned data """
if not os.path.exists(write_dir):
os.makedirs(write_dir)
f_base=write_dir+'/'+Varname
np.savetxt(f_base +'.txt', Var, delimiter=',')
np.savetxt(f_base +'_Count.txt', VarCount, delimiter=',')
np.savetxt(f_base+'_x.txt', x, delimiter=',')
np.savetxt(f_base+'_y.txt', y, delimiter=',')
def read_gridded_file(read_dir,Varname):
""" reads in the spatially binned data """
f_base=read_dir+'/'+Varname
Var=np.loadtxt(f_base +'.txt',delimiter=',')
VarCount=np.loadtxt(f_base +'_Count.txt',delimiter=',')
x=np.loadtxt(f_base+'_x.txt',delimiter=',')
y=np.loadtxt(f_base+'_y.txt',delimiter=',')
return Var,VarCount,x,y
def write_taxi_count_speed(write_dir,V1,V1name,V2,V2name,V3,V3name):
""" writes out the spatially binned data """
if not os.path.exists(write_dir):
os.makedirs(write_dir)
f_base=write_dir+'/'
np.savetxt(f_base + V1name + '.txt', V1, delimiter=',')
np.savetxt(f_base + V2name + '.txt', V2, delimiter=',')
np.savetxt(f_base + V3name + '.txt', V3, delimiter=',')
def read_taxi_count_speed(read_dir,Varname):
""" reads in the spatially binned data """
f_base=read_dir+'/'+Varname
Var=np.loadtxt(f_base +'.txt',delimiter=',')
return Var
""" END OF FUNCTIONS """
if __name__ == '__main__':
""" a basic example of reading, processing and plotting some taxi files """
# the directory with the data
dir_base='../data_sub_sampled/'
# choose which variables to import
# possible variables: 'pickup_time_hr','dist_mi','speed_mph','psgger','fare',
# 'tips','payment_type','pickup_lon','pickup_lat','drop_lon',
# 'drop_lat','elapsed_time_min'
Vars_To_Import=['dist_mi','pickup_lon','pickup_lat']
# read in all the data!
VarBig,Var_list=read_taxi_files(dir_base,Vars_To_Import)
# now bin the point data!
DistCount,DistMean,Distx,Disty=tpm.map_proc(VarBig,Var_list,'dist_mi',0.1,60,'True',600,700)
write_gridded_file('../data_products/',DistMean,DistCount,Distx,Disty,'dist_mi')
tpm.plt_map(DistCount,1,1000,Distx,Disty,True)
| gpl-3.0 |
zymsys/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hpsModelFrame.py | 22 | 2075 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris, resample
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
stocf = .2
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
mYst = resample(np.maximum(-200, mXr), mXr.size*stocf) # decimate the mag spectrum
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(2,1,1)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-100,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', lw=2, markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(2,1,2)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.6, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.0, label='mXr')
binFreq = (fs/2.0)*np.arange(mYst.size)/(mYst.size)
plt.plot(binFreq,mYst,'r', lw=1.5, label='mYst')
plt.axis([0,maxplotfreq,-100,max(mYh)+2])
plt.legend(prop={'size':15})
plt.title('mYh + mXr + mYst')
plt.tight_layout()
plt.savefig('hpsModelFrame.png')
plt.show()
| agpl-3.0 |
icdishb/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 4 | 8628 | """
Testing Recursive feature elimination
"""
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=10, n_jobs=1)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
diff_support = rfe.get_support() == rfe_svc.get_support()
assert_true(sum(diff_support) == len(diff_support))
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X,y)
assert_equal(sel.support_.sum(), n_features // 2)
| bsd-3-clause |
linsalrob/EdwardsLab | bin/resample.py | 1 | 1678 | """
Resample 80% of the data and plot a graph of how many new things we see. This is to answer an argument with Geni
"""
import os
import sys
import argparse
import matplotlib.pyplot as plt
from random import shuffle
def resample(size, percent, tries):
if percent > 1:
percent /= 100
# define an array of size size
data = [i for i in range(size)]
# where we put the results as a cumulative total
iterations = []
seen = set()
for t in range(tries):
# randomize the array
shuffle(data)
# see if we have seen percent things
new = 0
resampsize = int(size * percent)
# sys.stderr.write("resampling " + str(resampsize) + " from " + str(size) + "\n")
for i in range(resampsize):
if data[i] not in seen:
seen.add(data[i])
new += 1
if not iterations:
iterations.append(new)
else:
iterations.append(new+iterations[-1])
# now just plot the number of new things as a cumulative total
plt.plot(iterations)
plt.ylabel('New numbers seen')
plt.xlabel('Iteration')
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Resample a list of numbers to see the new things seen")
parser.add_argument('-s', help='Size of array to resample from (size of dataset)', type=int, required=True)
parser.add_argument('-p', help='Percent to resample at each iteration (float)', type=float, required=True)
parser.add_argument('-i', help='Number of iterations to run', type=int, required=True)
args = parser.parse_args()
resample(args.s, args.p, args.i) | mit |
jameskeaveney/ElecSus | elecsus/libs/RRFittingRoutine.py | 1 | 6567 | # Copyright 2014-2019 M. A. Zentile, J. Keaveney, L. Weller, D. Whiting,
# C. S. Adams and I. G. Hughes.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Random restart fitting routine.
Fit by taking a random sample around parameters and then
fit using Marquardt-Levenberg.
Complete rebuild of the original RR fitting module now using lmfit
Author: JK
Last updated 2018-02-21 MAZ
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
import numpy as np
import matplotlib.pyplot as plt
import warnings
import sys
import copy
import psutil
from multiprocessing import Pool
import MLFittingRoutine as ML
import lmfit as lm
from spectra import get_spectra
p_dict_bounds_default = {'lcell':1e-3,'Bfield':100., 'T':20.,
'GammaBuf':20., 'shift':100.,
# Polarisation of light
'theta0':10., 'E_x':0.05, 'E_y':0.05, 'E_phase':0.01,
# B-field angle w.r.t. light k-vector
'Btheta':10*3.14/180, 'Bphi':10*3.14/180,
'DoppTemp':20.,
'rb85frac':1, 'K40frac':1, 'K41frac':1,
}
def evaluate(args):
data = args[0]
E_in = args[1]
p_dict = args[2]
p_dict_bools = args[3]
data_type = args[4]
best_params, result = ML.ML_fit(data, E_in, p_dict, p_dict_bools, data_type)
#print 'Eval_ML COmplete'
# returns reduced chi-squared value and best fit parameters
return result.redchi, best_params #, result
def RR_fit(data,E_in,p_dict,p_dict_bools,p_dict_bounds=None,no_evals=None,data_type='S0',verbose=False):
"""
Random restart fitting method.
data: an Nx2 iterable for the x and y data to be fitted
E_in: the initial electric field input. See docstring for the spectra.py module for details.
no_evals: The number of randomly-selected start points for downhill fitting. Defaults to 2**(3+2*nFitParams) where nFitParams is
the number of varying fit parameters
p_dict: dictionary containing all the calculation (initial) parameters
p_dict_bools: dictionary with the same keys as p_dict, with Boolean values representing each parameter that is to be varied in the fitting
p_dict_bounds: dictionary with the same keys as p_dict, with values that are pairs of min/max values that each parameter can take.
NOTE: this works slightly differently to p_dict_bounds in the other fitting methods. In RR fitting, the bounds
select the range in parameter space that is randomly explored as starting parameters for a downhill fit, rather than being
strict bounds on the fit parameters.
data_type: Data type to fit experimental data to. Can be one of:
'S0', 'S1', 'S2', 'S3', 'Ix', 'Iy', ...
verbose: Boolean - more print statements provided as the program progresses
"""
if p_dict_bounds is None:
p_dict_bounds = p_dict_bounds_default
print('Starting Random Restart Fitting Routine')
x = np.array(data[0])
y = np.array(data[1])
p_dict['E_x'] = E_in[0]
p_dict['E_y'] = E_in[1][0]
p_dict['E_phase'] = E_in[1][1]
# count number of fit parameters
nFitParams = 0
for key in p_dict_bools:
if p_dict_bools[key]: nFitParams += 1
# default number of iterations based on number of fit parameters
if no_evals == None:
no_evals = nFitParams**2 + 5 # 2**(3+2*nFitParams)
# Create random array of starting parameters based on parameter ranges given in p_dict range dictionary
# Scattered uniformly over the parameter space
#clone the parameter dictionary
p_dict_list = []
for i in range(no_evals):
p_dict_list.append(copy.deepcopy(p_dict))
for key in p_dict_bools:
if p_dict_bools[key]==True:
start_vals = p_dict[key]
#print start_vals
for i in range(len(p_dict_list)):
p_dict_list[i][key] = start_vals + np.random.uniform(-1,1) * p_dict_bounds[key]
if verbose:
print('List of initial parameter dictionaries:')
for pd in p_dict_list:
print(pd)
#print p_dict_list
print('\n\n')
#Do parallel ML fitting by utilising multiple cores
po = Pool() # Pool() uses all cores, Pool(3) uses 3 cores for example.
## use lower process priority so computer is still responsive while calculating!!
# parent = psutil.Process()
# parent.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
# for child in parent.children():
# child.nice(psutil.IDLE_PRIORITY_CLASS)
args_list = [(data, E_in, p_dict_list[k], p_dict_bools, data_type) for k in range(no_evals)]
Res = po.map_async(evaluate,args_list)
result = Res.get()
po.close()
po.join()
if verbose: print('RR calculation complete')
#Find best fit
result = np.array(result)
#print result
#result = result.astype(np.float64)
lineMin = np.argmin(result[:,0]) ## pick the fit with the lowest cost value
best_values = result[lineMin][1] # best parameter dictionary
if verbose:
print('\n\n\n')
print(best_values)
p_dict_best = copy.deepcopy(p_dict)
p_dict_best.update(best_values)
# Finally run the ML fitting one more time, using the best parameters
# (so we get the final_result object, which cannot be pickled and therefore isn't supported in multiprocessing)
best_values, final_result = ML.ML_fit(data, E_in, p_dict_best, p_dict_bools, data_type)
# return best fit parameters, and the lmfit result object
return best_values, final_result
def test_fit():
p_dict = {'Elem':'Rb','Dline':'D2','T':80.,'lcell':2e-3,'Bfield':600.,'Btheta':0.,
'Bphi':0.,'GammaBuf':0.,'shift':0.}
# only need to specify parameters that are varied
p_dict_bools = {'T':True,'Bfield':True,'E_x':True}
p_dict_bounds = {'T':10,'Bfield':100,'E_x':0.01}
E_in = np.array([0.7,0.7,0])
E_in_angle = [E_in[0].real,[abs(E_in[1]),np.angle(E_in[1])]]
print(E_in_angle)
x = np.linspace(-10000,10000,100)
[y] = get_spectra(x,E_in,p_dict,outputs=['S1']) + np.random.randn(len(x))*0.015
data = [x,y.real]
best_params, result = RR_fit(data, E_in_angle, p_dict, p_dict_bools, p_dict_bounds, no_evals = 8, data_type='S1')
report = result.fit_report()
fit = result.best_fit
print(report)
plt.plot(x,y,'ko')
plt.plot(x,fit,'r-',lw=2)
plt.show()
if __name__ == '__main__':
test_fit()
| apache-2.0 |
yonglehou/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Kyle Kastner <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
bzero/statsmodels | statsmodels/regression/tests/test_regression.py | 18 | 38246 | """
Test functions for models.regression
"""
# TODO: Test for LM
from statsmodels.compat.python import long, lrange
import warnings
import pandas
import numpy as np
from numpy.testing import (assert_almost_equal, assert_approx_equal, assert_,
assert_raises, assert_equal, assert_allclose)
from scipy.linalg import toeplitz
from statsmodels.tools.tools import add_constant, categorical
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker
from statsmodels.datasets import longley
from scipy.stats import t as student_t
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_7 = 7
DECIMAL_0 = 0
class CheckRegressionResults(object):
"""
res2 contains results from Rmodelwrap or were obtained from a statistical
packages such as R, Stata, or SAS and were written to model_results
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_standarderrors = DECIMAL_4
def test_standarderrors(self):
assert_almost_equal(self.res1.bse,self.res2.bse,
self.decimal_standarderrors)
decimal_confidenceintervals = DECIMAL_4
def test_confidenceintervals(self):
#NOTE: stata rounds residuals (at least) to sig digits so approx_equal
conf1 = self.res1.conf_int()
conf2 = self.res2.conf_int()
for i in range(len(conf1)):
assert_approx_equal(conf1[i][0], conf2[i][0],
self.decimal_confidenceintervals)
assert_approx_equal(conf1[i][1], conf2[i][1],
self.decimal_confidenceintervals)
decimal_conf_int_subset = DECIMAL_4
def test_conf_int_subset(self):
if len(self.res1.params) > 1:
ci1 = self.res1.conf_int(cols=(1,2))
ci2 = self.res1.conf_int()[1:3]
assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset)
else:
pass
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_rsquared = DECIMAL_4
def test_rsquared(self):
assert_almost_equal(self.res1.rsquared, self.res2.rsquared,
self.decimal_rsquared)
decimal_rsquared_adj = DECIMAL_4
def test_rsquared_adj(self):
assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj,
self.decimal_rsquared_adj)
def test_degrees(self):
assert_equal(self.res1.model.df_model, self.res2.df_model)
assert_equal(self.res1.model.df_resid, self.res2.df_resid)
decimal_ess = DECIMAL_4
def test_ess(self):
#Explained Sum of Squares
assert_almost_equal(self.res1.ess, self.res2.ess,
self.decimal_ess)
decimal_ssr = DECIMAL_4
def test_sumof_squaredresids(self):
assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr)
decimal_mse_resid = DECIMAL_4
def test_mse_resid(self):
#Mean squared error of residuals
assert_almost_equal(self.res1.mse_model, self.res2.mse_model,
self.decimal_mse_resid)
decimal_mse_model = DECIMAL_4
def test_mse_model(self):
assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid,
self.decimal_mse_model)
decimal_mse_total = DECIMAL_4
def test_mse_total(self):
assert_almost_equal(self.res1.mse_total, self.res2.mse_total,
self.decimal_mse_total, err_msg="Test class %s" % self)
decimal_fvalue = DECIMAL_4
def test_fvalue(self):
#didn't change this, not sure it should complain -inf not equal -inf
#if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)):
assert_almost_equal(self.res1.fvalue, self.res2.fvalue,
self.decimal_fvalue)
decimal_loglike = DECIMAL_4
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_pvalues = DECIMAL_4
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_wresid = DECIMAL_4
def test_wresid(self):
assert_almost_equal(self.res1.wresid, self.res2.wresid,
self.decimal_wresid)
decimal_resids = DECIMAL_4
def test_resids(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resids)
decimal_norm_resids = DECIMAL_4
def test_norm_resids(self):
assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson,
self.decimal_norm_resids)
#TODO: test fittedvalues and what else?
class TestOLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import Longley
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
res2 = Longley()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
model_qr = OLS(data.endog, data.exog)
Q, R = np.linalg.qr(data.exog)
model_qr.exog_Q, model_qr.exog_R = Q, R
model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
model_qr.rank = np_matrix_rank(R)
res_qr2 = model_qr.fit(method="qr")
cls.res_qr = res_qr
cls.res_qr_manual = res_qr2
def test_eigenvalues(self):
eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals)
eigenval_perc_diff /= self.res_qr.eigenvals
zeros = np.zeros_like(eigenval_perc_diff)
assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7)
# Robust error tests. Compare values computed with SAS
def test_HC0_errors(self):
#They are split up because the copied results do not have any DECIMAL_4
#places for the last place.
assert_almost_equal(self.res1.HC0_se[:-1],
self.res2.HC0_se[:-1], DECIMAL_4)
assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_HC1_errors(self):
assert_almost_equal(self.res1.HC1_se[:-1],
self.res2.HC1_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1])
def test_HC2_errors(self):
assert_almost_equal(self.res1.HC2_se[:-1],
self.res2.HC2_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1])
def test_HC3_errors(self):
assert_almost_equal(self.res1.HC3_se[:-1],
self.res2.HC3_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1])
def test_qr_params(self):
assert_almost_equal(self.res1.params,
self.res_qr.params, 6)
def test_qr_normalized_cov_params(self):
#todo: need assert_close
assert_almost_equal(np.ones_like(self.res1.normalized_cov_params),
self.res1.normalized_cov_params /
self.res_qr.normalized_cov_params, 5)
def test_missing(self):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
data.endog[[3, 7, 14]] = np.nan
mod = OLS(data.endog, data.exog, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
def test_rsquared_adj_overfit(self):
# Test that if df_resid = 0, rsquared_adj = 0.
# This is a regression test for user issue:
# https://github.com/statsmodels/statsmodels/issues/868
with warnings.catch_warnings(record=True):
x = np.random.randn(5)
y = np.random.randn(5, 6)
results = OLS(x, y).fit()
rsquared_adj = results.rsquared_adj
assert_equal(rsquared_adj, np.nan)
def test_qr_alternatives(self):
assert_allclose(self.res_qr.params, self.res_qr_manual.params,
rtol=5e-12)
def test_norm_resid(self):
resid = self.res1.wresid
norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid)
model_norm_resid = self.res1.resid_pearson
assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7)
def test_norm_resid_zero_variance(self):
with warnings.catch_warnings(record=True):
y = self.res1.model.endog
res = OLS(y,y).fit()
assert_allclose(res.scale, 0, atol=1e-20)
assert_allclose(res.wresid, res.resid_pearson, atol=5e-11)
class TestRTO(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyRTO
data = longley.load()
res1 = OLS(data.endog, data.exog).fit()
res2 = LongleyRTO()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
cls.res_qr = res_qr
class TestFtest(object):
"""
Tests f_test vs. RegressionResults
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)[:-1,:]
cls.Ftest = cls.res1.f_test(R)
def test_F(self):
assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4)
def test_p(self):
assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4)
def test_Df_denom(self):
assert_equal(self.Ftest.df_denom, self.res1.model.df_resid)
def test_Df_num(self):
assert_equal(self.Ftest.df_num, 6)
class TestFTest2(object):
"""
A joint test that the coefficient on
GNP = the coefficient on UNEMP and that the coefficient on
POP = the coefficient on YEAR for the Longley dataset.
Ftest1 is from statsmodels. Results are from Rpy using R's car library.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]]
cls.Ftest1 = res1.f_test(R2)
hyp = 'x2 = x3, x5 = x6'
cls.NewFtest1 = res1.f_test(hyp)
def test_new_ftest(self):
assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue)
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 2)
class TestFtestQ(object):
"""
A joint hypothesis test that Rb = q. Coefficient tests are essentially
made up. Test values taken from Stata.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R = np.array([[0,1,1,0,0,0,0],
[0,1,0,1,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,1,0]])
q = np.array([0,0,0,1,0])
cls.Ftest1 = res1.f_test((R,q))
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 5)
class TestTtest(object):
"""
Test individual t-tests. Ie., are the coefficients significantly
different than zero.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)
cls.Ttest = cls.res1.t_test(R)
hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0'
cls.NewTTest = cls.res1.t_test(hyp)
def test_new_tvalue(self):
assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue)
def test_tvalue(self):
assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest.pvalue, student_t.sf(
np.abs(self.res1.tvalues), self.res1.model.df_resid)*2,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest.df_denom, self.res1.model.df_resid)
def test_effect(self):
assert_almost_equal(self.Ttest.effect, self.res1.params)
class TestTtest2(object):
"""
Tests the hypothesis that the coefficients on POP and YEAR
are equal.
Results from RPy using 'car' package.
"""
@classmethod
def setupClass(cls):
R = np.zeros(7)
R[4:6] = [1,-1]
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
cls.Ttest1 = res1.t_test(R)
def test_tvalue(self):
assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284,
DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest1.df_denom, 9)
def test_effect(self):
assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4)
class TestGLS(object):
"""
These test results were obtained by replication with R.
"""
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyGls
data = longley.load()
exog = add_constant(np.column_stack((data.exog[:,1],
data.exog[:,4])), prepend=False)
tmp_results = OLS(data.endog, exog).fit()
rho = np.corrcoef(tmp_results.resid[1:],
tmp_results.resid[:-1])[0][1] # by assumption
order = toeplitz(np.arange(16))
sigma = rho**order
GLS_results = GLS(data.endog, exog, sigma=sigma).fit()
cls.res1 = GLS_results
cls.res2 = LongleyGls()
# attach for test_missing
cls.sigma = sigma
cls.exog = exog
cls.endog = data.endog
def test_aic(self):
assert_approx_equal(self.res1.aic+2, self.res2.aic, 3)
def test_bic(self):
assert_approx_equal(self.res1.bic, self.res2.bic, 2)
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4)
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4)
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4)
def test_standarderrors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
DECIMAL_4)
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
def test_missing(self):
endog = self.endog.copy() # copy or changes endog for other methods
endog[[4,7,14]] = np.nan
mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
assert_equal(mod.sigma.shape, (13,13))
class TestGLS_alt_sigma(CheckRegressionResults):
"""
Test that GLS with no argument is equivalent to OLS.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_res = OLS(data.endog, data.exog).fit()
gls_res = GLS(data.endog, data.exog).fit()
gls_res_scalar = GLS(data.endog, data.exog, sigma=1)
cls.endog = data.endog
cls.exog = data.exog
cls.res1 = gls_res
cls.res2 = ols_res
cls.res3 = gls_res_scalar
# self.res2.conf_int = self.res2.conf_int()
def test_wrong_size_sigma_1d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1))
def test_wrong_size_sigma_2d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1)))
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2, DECIMAL_4)
class TestLM(object):
@classmethod
def setupClass(cls):
# TODO: Test HAC method
X = np.random.randn(100,3)
b = np.ones((3,1))
e = np.random.randn(100,1)
y = np.dot(X,b) + e
# Cases?
# Homoskedastic
# HC0
cls.res1_full = OLS(y,X).fit()
cls.res1_restricted = OLS(y,X[:,0]).fit()
cls.res2_full = cls.res1_full.get_robustcov_results('HC0')
cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0')
cls.X = X
cls.Y = y
def test_LM_homoskedastic(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
S = np.dot(resid,resid) / n * np.dot(X.T,X) / n
Sinv = np.linalg.inv(S)
s = np.mean(X * resid[:,None], 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_nodemean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_demean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
scores_demean = scores - scores.mean(0)
S = np.dot(scores_demean.T,scores_demean) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_LRversion(self):
resid = self.res1_restricted.wresid
resid_full = self.res1_full.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
s = np.mean(scores, 0)
scores = X * resid_full[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_nonnested(self):
assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full)
class TestOLS_GLS_WLS_equivalence(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
w = np.ones(n)
cls.results = []
cls.results.append(OLS(y, X).fit())
cls.results.append(WLS(y, X, w).fit())
cls.results.append(GLS(y, X, 100*w).fit())
cls.results.append(GLS(y, X, np.diag(0.1*w)).fit())
def test_ll(self):
llf = np.array([r.llf for r in self.results])
llf_1 = np.ones_like(llf) * self.results[0].llf
assert_almost_equal(llf, llf_1, DECIMAL_7)
ic = np.array([r.aic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].aic
assert_almost_equal(ic, ic_1, DECIMAL_7)
ic = np.array([r.bic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].bic
assert_almost_equal(ic, ic_1, DECIMAL_7)
def test_params(self):
params = np.array([r.params for r in self.results])
params_1 = np.array([self.results[0].params] * len(self.results))
assert_allclose(params, params_1)
def test_ss(self):
bse = np.array([r.bse for r in self.results])
bse_1 = np.array([self.results[0].bse] * len(self.results))
assert_allclose(bse, bse_1)
def test_rsquared(self):
rsquared = np.array([r.rsquared for r in self.results])
rsquared_1 = np.array([self.results[0].rsquared] * len(self.results))
assert_almost_equal(rsquared, rsquared_1, DECIMAL_7)
class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence):
# reuse test methods
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
np.random.seed(5)
w = np.random.uniform(0.5, 1, n)
w_inv = 1. / w
cls.results = []
cls.results.append(WLS(y, X, w).fit())
cls.results.append(WLS(y, X, 0.01 * w).fit())
cls.results.append(GLS(y, X, 100 * w_inv).fit())
cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit())
def test_rsquared(self):
# TODO: WLS rsquared is ok, GLS might have wrong centered_tss
# We only check that WLS and GLS rsquared is invariant to scaling
# WLS and GLS have different rsquared
assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared,
DECIMAL_7)
assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared,
DECIMAL_7)
class TestNonFit(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.endog = data.endog
cls.exog = data.exog
cls.ols_model = OLS(data.endog, data.exog)
def test_df_resid(self):
df_resid = self.endog.shape[0] - self.exog.shape[1]
assert_equal(self.ols_model.df_resid, long(9))
class TestWLS_CornerCases(object):
@classmethod
def setupClass(cls):
cls.exog = np.ones((1,))
cls.endog = np.ones((1,))
weights = 1
cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit()
def test_wrong_size_weights(self):
weights = np.ones((10,10))
assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights)
class TestWLSExogWeights(CheckRegressionResults):
#Test WLS with Greene's credit card data
#reg avgexp age income incomesq ownrent [aw=1/incomesq]
def __init__(self):
from .results.results_regression import CCardWLS
from statsmodels.datasets.ccard import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=False)
nobs = 72.
weights = 1/dta.exog[:,2]
# for comparison with stata analytic weights
scaled_weights = ((weights * nobs)/weights.sum())
self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit()
self.res2 = CCardWLS()
self.res2.wresid = scaled_weights ** .5 * self.res2.resid
# correction because we use different definition for loglike/llf
corr_ic = 2 * (self.res1.llf - self.res2.llf)
self.res2.aic -= corr_ic
self.res2.bic -= corr_ic
self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights))
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
def test_wls_tss():
y = np.array([22, 22, 22, 23, 23, 23])
X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]]
ols_mod = OLS(y, add_constant(X, prepend=False)).fit()
yw = np.array([22, 22, 23.])
Xw = [[1,0],[1,1],[0,1]]
w = np.array([2, 1, 3.])
wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit()
assert_equal(ols_mod.centered_tss, wls_mod.centered_tss)
class TestWLSScalarVsArray(CheckRegressionResults):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=True)
wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit()
weights = [1/3.] * len(dta.endog)
wls_array = WLS(dta.endog, dta.exog, weights=weights).fit()
cls.res1 = wls_scalar
cls.res2 = wls_array
#class TestWLS_GLS(CheckRegressionResults):
# @classmethod
# def setupClass(cls):
# from statsmodels.datasets.ccard import load
# data = load()
# cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit()
# cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit()
#
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2(), DECIMAL_4)
def test_wls_missing():
from statsmodels.datasets.ccard import load
data = load()
endog = data.endog
endog[[10, 25]] = np.nan
mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop')
assert_equal(mod.endog.shape[0], 70)
assert_equal(mod.exog.shape[0], 70)
assert_equal(mod.weights.shape[0], 70)
class TestWLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = WLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLS(data.endog, data.exog).fit()
cls.res2 = OLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
#TODO: test AR
# why the two-stage in AR?
#class test_ar(object):
# from statsmodels.datasets.sunspots import load
# data = load()
# model = AR(data.endog, rho=4).fit()
# R_res = RModel(data.endog, aic="FALSE", order_max=4)
# def test_params(self):
# assert_almost_equal(self.model.rho,
# pass
# def test_order(self):
# In R this can be defined or chosen by minimizing the AIC if aic=True
# pass
class TestYuleWalker(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.sunspots import load
data = load()
cls.rho, cls.sigma = yule_walker(data.endog, order=4,
method="mle")
cls.R_params = [1.2831003105694765, -0.45240924374091945,
-0.20770298557575195, 0.047943648089542337]
def test_params(self):
assert_almost_equal(self.rho, self.R_params, DECIMAL_4)
class TestDataDimensions(CheckRegressionResults):
@classmethod
def setupClass(cls):
np.random.seed(54321)
cls.endog_n_ = np.random.uniform(0,20,size=30)
cls.endog_n_one = cls.endog_n_[:,None]
cls.exog_n_ = np.random.uniform(0,20,size=30)
cls.exog_n_one = cls.exog_n_[:,None]
cls.degen_exog = cls.exog_n_one[:-1]
cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod1.df_model += 1
cls.res1 = cls.mod1.fit()
# Note that these are created for every subclass..
# A little extra overhead probably
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_large_data(TestDataDimensions):
@classmethod
def setupClass(cls):
nobs = 1000
y = np.random.randn(nobs,1)
X = np.random.randn(nobs,20)
sigma = np.ones_like(y)
cls.gls_res = GLS(y, X, sigma=sigma).fit()
cls.gls_res_scalar = GLS(y, X, sigma=1).fit()
cls.gls_res_none= GLS(y, X).fit()
cls.ols_res = OLS(y, X).fit()
def test_large_equal_params(self):
assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7)
def test_large_equal_loglike(self):
assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7)
def test_large_equal_params_none(self):
assert_almost_equal(self.gls_res.params, self.gls_res_none.params,
DECIMAL_7)
class TestNxNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxOneNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxOneNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxNxOne(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNxOne, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def test_bad_size():
np.random.seed(54321)
data = np.random.uniform(0,20,31)
assert_raises(ValueError, OLS, data, data[1:])
def test_const_indicator():
np.random.seed(12345)
X = np.random.randint(0, 3, size=30)
X = categorical(X, drop=True)
y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30)
modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit()
mod = OLS(y, X, hasconst=True).fit()
assert_almost_equal(modc.rsquared, mod.rsquared, 12)
def test_706():
# make sure one regressor pandas Series gets passed to DataFrame
# for conf_int.
y = pandas.Series(np.random.randn(10))
x = pandas.Series(np.ones(10))
res = OLS(y,x).fit()
conf_int = res.conf_int()
np.testing.assert_equal(conf_int.shape, (1, 2))
np.testing.assert_(isinstance(conf_int, pandas.DataFrame))
def test_summary():
# test 734
import re
dta = longley.load_pandas()
X = dta.exog
X["constant"] = 1
y = dta.endog
with warnings.catch_warnings(record=True):
res = OLS(y, X).fit()
table = res.summary().as_latex()
# replace the date and time
table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&",
" Sun, 07 Apr 2013 &", table)
table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&",
" 13:46:07 &", table)
expected = """\\begin{center}
\\begin{tabular}{lclc}
\\toprule
\\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\
\\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\
\\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\
\\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\
\\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\
\\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\
\\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\
\\textbf{Df Model:} & 6 & \\textbf{ } & \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lcccccc}
& \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[0.025} & \\textbf{0.975]} \\\\
\\midrule
\\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 & 207.153 \\\\
\\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 & 0.040 \\\\
\\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 & -0.915 \\\\
\\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 & -0.549 \\\\
\\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 & 0.460 \\\\
\\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 & 2859.515 \\\\
\\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 & -1.47e+06 \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lclc}
\\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\
\\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\
\\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\
\\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\
\\bottomrule
\\end{tabular}
%\\caption{OLS Regression Results}
\\end{center}"""
assert_equal(table, expected)
class TestRegularizedFit(object):
# Make sure there are no issues when there are no selected
# variables.
def test_empty_model(self):
np.random.seed(742)
n = 100
endog = np.random.normal(size=n)
exog = np.random.normal(size=(n, 3))
model = OLS(endog, exog)
result = model.fit_regularized(alpha=1000)
assert_equal(result.params, 0.)
assert_equal(result.bse, 0.)
def test_regularized(self):
import os
from . import glmnet_r_results
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"),
delimiter=",")
tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")]
for test in tests:
vec = getattr(glmnet_r_results, test)
n = vec[0]
p = vec[1]
L1_wt = float(vec[2])
lam = float(vec[3])
params = vec[4:].astype(np.float64)
endog = data[0:int(n), 0]
exog = data[0:int(n), 1:(int(p)+1)]
endog = endog - endog.mean()
endog /= endog.std(ddof=1)
exog = exog - exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = OLS(endog, exog)
rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam)
assert_almost_equal(rslt.params, params, decimal=3)
# Smoke test for summary
smry = rslt.summary()
def test_formula_missing_cat():
# gh-805
import statsmodels.api as sm
from statsmodels.formula.api import ols
from patsy import PatsyError
dta = sm.datasets.grunfeld.load_pandas().data
dta.ix[0, 'firm'] = np.nan
mod = ols(formula='value ~ invest + capital + firm + year',
data=dta.dropna())
res = mod.fit()
mod2 = ols(formula='value ~ invest + capital + firm + year',
data=dta)
res2 = mod2.fit()
assert_almost_equal(res.params.values, res2.params.values)
assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year',
data=dta, missing='raise')
def test_missing_formula_predict():
# see 2171
nsample = 30
data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)})
null = pandas.DataFrame({'x': np.array([np.nan])})
data = pandas.concat([data, null])
beta = np.array([1, 0.1])
e = np.random.normal(size=nsample+1)
data['y'] = beta[0] + beta[1] * data['x'] + e
model = OLS.from_formula('y ~ x', data=data)
fit = model.fit()
pred = fit.predict(exog=data[:-1])
def test_fvalue_implicit_constant():
nobs = 100
np.random.seed(2)
x = np.random.randn(nobs, 1)
x = ((x > 0) == [True, False]).astype(int)
y = x.sum(1) + np.random.randn(nobs)
w = 1 + 0.25 * np.random.rand(nobs)
from statsmodels.regression.linear_model import OLS, WLS
res = OLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
res = WLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
if __name__=="__main__":
import nose
# run_module_suite()
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
| bsd-3-clause |
maheshakya/scikit-learn | sklearn/datasets/svmlight_format.py | 6 | 14944 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f: {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features: int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
line_pattern = u("%d")
else:
line_pattern = u("%.16g")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if query_id is not None:
feat = (y[i], query_id[i], s)
else:
feat = (y[i], s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, one_based, comment, query_id)
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/backends/qt_compat.py | 10 | 4816 | """ A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
else:
QT_RC_MAJOR_VERSION = 4
QT_API = None
if (QT_API_ENV is not None):
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
else:
QT_API = rcParams['backend.qt4']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
else: # PyQt5 API
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
else: # try importing pyside
from PySide import QtCore, QtGui, __version__, __version_info__
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
| mit |
fzalkow/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
tdhopper/scikit-learn | sklearn/linear_model/passive_aggressive.py | 97 | 10879 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
jforbess/pvlib-python | pvlib/pvsystem.py | 1 | 42489 | """
The ``pvsystem`` module contains functions for modeling the output and
performance of PV modules and inverters.
"""
from __future__ import division
import logging
pvl_logger = logging.getLogger('pvlib')
import io
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
import pandas as pd
from pvlib import tools
def systemdef(meta, surface_tilt, surface_azimuth, albedo, series_modules,
parallel_modules):
'''
Generates a dict of system parameters used throughout a simulation.
Parameters
----------
meta : dict
meta dict either generated from a TMY file using readtmy2 or readtmy3,
or a dict containing at least the following fields:
=============== ====== ====================
meta field format description
=============== ====== ====================
meta.altitude Float site elevation
meta.latitude Float site latitude
meta.longitude Float site longitude
meta.Name String site name
meta.State String state
meta.TZ Float timezone
=============== ====== ====================
surface_tilt : float or Series
Surface tilt angles in decimal degrees.
The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : float or Series
Surface azimuth angles in decimal degrees.
The azimuth convention is defined
as degrees east of north
(North=0, South=180, East=90, West=270).
albedo : float or Series
Ground reflectance, typically 0.1-0.4 for
surfaces on Earth (land), may increase over snow, ice, etc. May also
be known as the reflection coefficient. Must be >=0 and <=1.
series_modules : int
Number of modules connected in series in a string.
parallel_modules : int
Number of strings connected in parallel.
Returns
-------
Result : dict
A dict with the following fields.
* 'surface_tilt'
* 'surface_azimuth'
* 'albedo'
* 'series_modules'
* 'parallel_modules'
* 'latitude'
* 'longitude'
* 'tz'
* 'name'
* 'altitude'
See also
--------
pvlib.tmy.readtmy3
pvlib.tmy.readtmy2
'''
try:
name = meta['Name']
except KeyError:
name = meta['City']
system = {'surface_tilt': surface_tilt,
'surface_azimuth': surface_azimuth,
'albedo': albedo,
'series_modules': series_modules,
'parallel_modules': parallel_modules,
'latitude': meta['latitude'],
'longitude': meta['longitude'],
'tz': meta['TZ'],
'name': name,
'altitude': meta['altitude']}
return system
def ashraeiam(b, aoi):
'''
Determine the incidence angle modifier using the ASHRAE transmission model.
ashraeiam calculates the incidence angle modifier as developed in
[1], and adopted by ASHRAE (American Society of Heating, Refrigeration,
and Air Conditioning Engineers) [2]. The model has been used by model
programs such as PVSyst [3].
Note: For incident angles near 90 degrees, this model has a
discontinuity which has been addressed in this function.
Parameters
----------
b : float
A parameter to adjust the modifier as a function of angle of
incidence. Typical values are on the order of 0.05 [3].
aoi : Series
The angle of incidence between the module normal vector and the
sun-beam vector in degrees.
Returns
-------
IAM : Series
The incident angle modifier calculated as 1-b*(sec(aoi)-1) as
described in [2,3].
Returns nan for all abs(aoi) >= 90 and for all IAM values
that would be less than 0.
References
----------
[1] Souka A.F., Safwat H.H., "Determindation of the optimum orientations
for the double exposure flat-plate collector and its reflections".
Solar Energy vol .10, pp 170-174. 1966.
[2] ASHRAE standard 93-77
[3] PVsyst Contextual Help.
http://files.pvsyst.com/help/index.html?iam_loss.htm retrieved on
September 10, 2012
See Also
--------
irradiance.aoi
physicaliam
'''
IAM = 1 - b*((1/np.cos(np.radians(aoi)) - 1))
IAM[abs(aoi) >= 90] = np.nan
IAM[IAM < 0] = np.nan
return IAM
def physicaliam(K, L, n, aoi):
'''
Determine the incidence angle modifier using refractive
index, glazing thickness, and extinction coefficient
physicaliam calculates the incidence angle modifier as described in
De Soto et al. "Improvement and validation of a model for photovoltaic
array performance", section 3. The calculation is based upon a physical
model of absorbtion and transmission through a cover. Required
information includes, incident angle, cover extinction coefficient,
cover thickness
Note: The authors of this function believe that eqn. 14 in [1] is
incorrect. This function uses the following equation in its place:
theta_r = arcsin(1/n * sin(theta))
Parameters
----------
K : float
The glazing extinction coefficient in units of 1/meters. Reference
[1] indicates that a value of 4 is reasonable for "water white"
glass. K must be a numeric scalar or vector with all values >=0. If K
is a vector, it must be the same size as all other input vectors.
L : float
The glazing thickness in units of meters. Reference [1] indicates
that 0.002 meters (2 mm) is reasonable for most glass-covered
PV panels. L must be a numeric scalar or vector with all values >=0.
If L is a vector, it must be the same size as all other input vectors.
n : float
The effective index of refraction (unitless). Reference [1]
indicates that a value of 1.526 is acceptable for glass. n must be a
numeric scalar or vector with all values >=0. If n is a vector, it
must be the same size as all other input vectors.
aoi : Series
The angle of incidence between the module normal vector and the
sun-beam vector in degrees.
Returns
-------
IAM : float or Series
The incident angle modifier as specified in eqns. 14-16 of [1].
IAM is a column vector with the same number of elements as the
largest input vector.
Theta must be a numeric scalar or vector.
For any values of theta where abs(aoi)>90, IAM is set to 0. For any
values of aoi where -90 < aoi < 0, theta is set to abs(aoi) and
evaluated.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] Duffie, John A. & Beckman, William A.. (2006). Solar Engineering
of Thermal Processes, third edition. [Books24x7 version] Available
from http://common.books24x7.com/toc.aspx?bookid=17160.
See Also
--------
getaoi
ephemeris
spa
ashraeiam
'''
thetar_deg = tools.asind(1.0 / n*(tools.sind(aoi)))
tau = ( np.exp(- 1.0 * (K*L / tools.cosd(thetar_deg))) *
((1 - 0.5*((((tools.sind(thetar_deg - aoi)) ** 2) /
((tools.sind(thetar_deg + aoi)) ** 2) +
((tools.tand(thetar_deg - aoi)) ** 2) /
((tools.tand(thetar_deg + aoi)) ** 2))))) )
zeroang = 1e-06
thetar_deg0 = tools.asind(1.0 / n*(tools.sind(zeroang)))
tau0 = ( np.exp(- 1.0 * (K*L / tools.cosd(thetar_deg0))) *
((1 - 0.5*((((tools.sind(thetar_deg0 - zeroang)) ** 2) /
((tools.sind(thetar_deg0 + zeroang)) ** 2) +
((tools.tand(thetar_deg0 - zeroang)) ** 2) /
((tools.tand(thetar_deg0 + zeroang)) ** 2))))) )
IAM = tau / tau0
IAM[abs(aoi) >= 90] = np.nan
IAM[IAM < 0] = np.nan
return IAM
def calcparams_desoto(poa_global, temp_cell, alpha_isc, module_parameters,
EgRef, dEgdT, M=1, irrad_ref=1000, temp_ref=25):
'''
Applies the temperature and irradiance corrections to
inputs for singlediode.
Applies the temperature and irradiance corrections to the IL, I0,
Rs, Rsh, and a parameters at reference conditions (IL_ref, I0_ref,
etc.) according to the De Soto et. al description given in [1]. The
results of this correction procedure may be used in a single diode
model to determine IV curves at irradiance = S, cell temperature =
Tcell.
Parameters
----------
poa_global : float or Series
The irradiance (in W/m^2) absorbed by the module.
temp_cell : float or Series
The average cell temperature of cells within a module in C.
alpha_isc : float
The short-circuit current temperature coefficient of the
module in units of 1/C.
module_parameters : dict
Parameters describing PV module performance at reference
conditions according to DeSoto's paper. Parameters may be
generated or found by lookup. For ease of use,
retrieve_sam can automatically generate a dict based on the
most recent SAM CEC module
database. The module_parameters dict must contain the
following 5 fields:
* A_ref - modified diode ideality factor parameter at
reference conditions (units of eV), a_ref can be calculated
from the usual diode ideality factor (n),
number of cells in series (Ns),
and cell temperature (Tcell) per equation (2) in [1].
* I_l_ref - Light-generated current (or photocurrent)
in amperes at reference conditions. This value is referred to
as Iph in some literature.
* I_o_ref - diode reverse saturation current in amperes,
under reference conditions.
* R_sh_ref - shunt resistance under reference conditions (ohms).
* R_s - series resistance under reference conditions (ohms).
EgRef : float
The energy bandgap at reference temperature (in eV).
1.121 eV for silicon. EgRef must be >0.
dEgdT : float
The temperature dependence of the energy bandgap at SRC (in 1/C).
May be either a scalar value (e.g. -0.0002677 as in [1]) or a
DataFrame of dEgdT values corresponding to each input condition (this
may be useful if dEgdT is a function of temperature).
M : float or Series (optional, default=1)
An optional airmass modifier, if omitted, M is given a value of 1,
which assumes absolute (pressure corrected) airmass = 1.5. In this
code, M is equal to M/Mref as described in [1] (i.e. Mref is assumed
to be 1). Source [1] suggests that an appropriate value for M
as a function absolute airmass (AMa) may be:
>>> M = np.polyval([-0.000126, 0.002816, -0.024459, 0.086257, 0.918093],
... AMa) # doctest: +SKIP
M may be a Series.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : float or Series
Light-generated current in amperes at irradiance=S and
cell temperature=Tcell.
saturation_current : float or Series
Diode saturation curent in amperes at irradiance
S and cell temperature Tcell.
resistance_series : float
Series resistance in ohms at irradiance S and cell temperature Tcell.
resistance_shunt : float or Series
Shunt resistance in ohms at irradiance S and cell temperature Tcell.
nNsVth : float or Series
Modified diode ideality factor at irradiance S and cell temperature
Tcell. Note that in source [1] nNsVth = a (equation 2). nNsVth is the
product of the usual diode ideality factor (n), the number of
series-connected cells in the module (Ns), and the thermal voltage
of a cell in the module (Vth) at a cell temperature of Tcell.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] System Advisor Model web page. https://sam.nrel.gov.
[3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
[4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
sapm
sapm_celltemp
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor Model),
it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described in
[3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M) is
provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon the
various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference
parameters and modifying the reference parameters (for irradiance,
temperature, and airmass) per DeSoto's equations.
Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.918093],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
M = np.max(M, 0)
a_ref = module_parameters['A_ref']
IL_ref = module_parameters['I_l_ref']
I0_ref = module_parameters['I_o_ref']
Rsh_ref = module_parameters['R_sh_ref']
Rs_ref = module_parameters['R_s']
k = 8.617332478e-05
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
IL = (poa_global/irrad_ref) * M * (IL_ref + alpha_isc * (Tcell_K - Tref_K))
I0 = ( I0_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))) )
Rsh = Rsh_ref * (irrad_ref / poa_global)
Rs = Rs_ref
return IL, I0, Rs, Rsh, nNsVth
def retrieve_sam(name=None, samfile=None):
'''
Retrieve lastest module and inverter info from SAM website.
This function will retrieve either:
* CEC module database
* Sandia Module database
* Sandia Inverter database
and return it as a pandas dataframe.
Parameters
----------
name : String
Name can be one of:
* 'CECMod' - returns the CEC module database
* 'SandiaInverter' - returns the Sandia Inverter database
* 'SandiaMod' - returns the Sandia Module database
samfile : String
Absolute path to the location of local versions of the SAM file.
If file is specified, the latest versions of the SAM database will
not be downloaded. The selected file must be in .csv format.
If set to 'select', a dialogue will open allowing the user to navigate
to the appropriate page.
Returns
-------
A DataFrame containing all the elements of the desired database.
Each column representa a module or inverter, and a specific dataset
can be retreived by the command
Examples
--------
>>> from pvlib import pvsystem
>>> invdb = pvsystem.retrieve_sam(name='SandiaInverter')
>>> inverter = invdb.AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_
>>> inverter
Vac 277.000000
Paco 6000.000000
Pdco 6165.670000
Vdco 361.123000
Pso 36.792300
C0 -0.000002
C1 -0.000047
C2 -0.001861
C3 0.000721
Pnt 0.070000
Vdcmax 600.000000
Idcmax 32.000000
Mppt_low 200.000000
Mppt_high 500.000000
Name: AE_Solar_Energy__AE6_0__277V__277V__CEC_2012_, dtype: float64
'''
if name is not None:
name = name.lower()
if name == 'cecmod':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-cec-modules-2014-1-14.csv'
elif name == 'sandiamod':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-sandia-modules-2014-1-14.csv'
elif name == 'sandiainverter':
url = 'https://sam.nrel.gov/sites/sam.nrel.gov/files/sam-library-sandia-inverters-2014-1-14.csv'
elif samfile is None:
raise ValueError('invalid name {}'.format(name))
if name is None and samfile is None:
raise ValueError('must supply name or samfile')
if samfile is None:
pvl_logger.info('retrieving {} from {}'.format(name, url))
response = urlopen(url)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
elif samfile == 'select':
import Tkinter
from tkFileDialog import askopenfilename
Tkinter.Tk().withdraw()
csvdata = askopenfilename()
else:
csvdata = samfile
return _parse_raw_sam_df(csvdata)
def _parse_raw_sam_df(csvdata):
df = pd.read_csv(csvdata, index_col=0)
parsedindex = []
for index in df.index:
parsedindex.append(index.replace(' ', '_').replace('-', '_')
.replace('.', '_').replace('(', '_')
.replace(')', '_').replace('[', '_')
.replace(']', '_').replace(':', '_'))
df.index = parsedindex
df = df.transpose()
return df
def sapm(module, poa_direct, poa_diffuse, temp_cell, airmass_absolute, aoi):
'''
The Sandia PV Array Performance Model (SAPM) generates 5 points on a PV
module's I-V curve (Voc, Isc, Ix, Ixx, Vmp/Imp) according to
SAND2004-3535. Assumes a reference cell temperature of 25 C.
Parameters
----------
module : Series or dict
A DataFrame defining the SAPM performance parameters.
poa_direct : Series
The direct irradiance incident upon the module (W/m^2).
poa_diffuse : Series
The diffuse irradiance incident on module.
temp_cell : Series
The cell temperature (degrees C).
airmass_absolute : Series
Absolute airmass.
aoi : Series
Angle of incidence (degrees).
Returns
-------
A DataFrame with the columns:
* i_sc : Short-circuit current (A)
* I_mp : Current at the maximum-power point (A)
* v_oc : Open-circuit voltage (V)
* v_mp : Voltage at maximum-power point (V)
* p_mp : Power at maximum-power point (W)
* i_x : Current at module V = 0.5Voc, defines 4th point on I-V
curve for modeling curve shape
* i_xx : Current at module V = 0.5(Voc+Vmp), defines 5th point on
I-V curve for modeling curve shape
* effective_irradiance : Effective irradiance
Notes
-----
The coefficients from SAPM which are required in ``module`` are:
======== ===============================================================
Key Description
======== ===============================================================
A0-A4 The airmass coefficients used in calculating
effective irradiance
B0-B5 The angle of incidence coefficients used in calculating
effective irradiance
C0-C7 The empirically determined coefficients relating
Imp, Vmp, Ix, and Ixx to effective irradiance
Isco Short circuit current at reference condition (amps)
Impo Maximum power current at reference condition (amps)
Aisc Short circuit current temperature coefficient at
reference condition (1/C)
Aimp Maximum power current temperature coefficient at
reference condition (1/C)
Bvoc Open circuit voltage temperature coefficient at
reference condition (V/C)
Mbvoc Coefficient providing the irradiance dependence for the BetaVoc
temperature coefficient at reference irradiance (V/C)
Bvmpo Maximum power voltage temperature coefficient at
reference condition
Mbvmp Coefficient providing the irradiance dependence for the
BetaVmp temperature coefficient at reference irradiance (V/C)
N Empirically determined "diode factor" (dimensionless)
#Series Number of cells in series in a module's cell string(s)
IXO Ix at reference conditions
IXXO Ixx at reference conditions
FD Fraction of diffuse irradiance used by module
======== ===============================================================
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance Model",
SAND Report 3535, Sandia National Laboratories, Albuquerque, NM.
See Also
--------
retrieve_sam
sapm_celltemp
'''
T0 = 25
q = 1.60218e-19 # Elementary charge in units of coulombs
kb = 1.38066e-23 # Boltzmann's constant in units of J/K
E0 = 1000
am_coeff = [module['A4'], module['A3'], module['A2'], module['A1'],
module['A0']]
aoi_coeff = [module['B5'], module['B4'], module['B3'], module['B2'],
module['B1'], module['B0']]
F1 = np.polyval(am_coeff, airmass_absolute)
F2 = np.polyval(aoi_coeff, aoi)
# Ee is the "effective irradiance"
Ee = F1 * ( (poa_direct*F2 + module['FD']*poa_diffuse) / E0 )
Ee.fillna(0, inplace=True)
Ee = Ee.clip_lower(0)
Bvmpo = module['Bvmpo'] + module['Mbvmp']*(1 - Ee)
Bvoco = module['Bvoco'] + module['Mbvoc']*(1 - Ee)
delta = module['N'] * kb * (temp_cell + 273.15) / q
dfout = pd.DataFrame(index=Ee.index)
dfout['i_sc'] = (
module['Isco'] * Ee * (1 + module['Aisc']*(temp_cell - T0)) )
dfout['i_mp'] = ( module['Impo'] *
(module['C0']*Ee + module['C1']*(Ee**2)) *
(1 + module['Aimp']*(temp_cell - T0)) )
dfout['v_oc'] = (( module['Voco'] +
module['#Series']*delta*np.log(Ee) + Bvoco*(temp_cell - T0) )
.clip_lower(0))
dfout['v_mp'] = ( module['Vmpo'] +
module['C2']*module['#Series']*delta*np.log(Ee) +
module['C3']*module['#Series']*((delta*np.log(Ee)) ** 2) +
Bvmpo*(temp_cell - T0) ).clip_lower(0)
dfout['p_mp'] = dfout['i_mp'] * dfout['v_mp']
dfout['i_x'] = ( module['IXO'] *
(module['C4']*Ee + module['C5']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - T0)) )
# the Ixx calculation in King 2004 has a typo (mixes up Aisc and Aimp)
dfout['i_xx'] = ( module['IXXO'] *
(module['C6']*Ee + module['C7']*(Ee**2)) *
(1 + module['Aisc']*(temp_cell - T0)) )
dfout['effective_irradiance'] = Ee
return dfout
def sapm_celltemp(irrad, wind, temp, model='open_rack_cell_glassback'):
'''
Estimate cell and module temperatures per the Sandia PV Array
Performance Model (SAPM, SAND2004-3535), from the incident
irradiance, wind speed, ambient temperature, and SAPM module
parameters.
Parameters
----------
irrad : float or Series
Total incident irradiance in W/m^2.
wind : float or Series
Wind speed in m/s at a height of 10 meters.
temp : float or Series
Ambient dry bulb temperature in degrees C.
model : string or list
Model to be used.
If string, can be:
* 'open_rack_cell_glassback' (default)
* 'roof_mount_cell_glassback'
* 'open_rack_cell_polymerback'
* 'insulated_back_polymerback'
* 'open_rack_polymer_thinfilm_steel'
* '22x_concentrator_tracker'
If list, supply the following parameters in the following order:
* a : float
SAPM module parameter for establishing the upper
limit for module temperature at low wind speeds and
high solar irradiance.
* b : float
SAPM module parameter for establishing the rate at
which the module temperature drops as wind speed increases
(see SAPM eqn. 11).
* deltaT : float
SAPM module parameter giving the temperature difference
between the cell and module back surface at the
reference irradiance, E0.
Returns
--------
DataFrame with columns 'temp_cell' and 'temp_module'.
Values in degrees C.
References
----------
[1] King, D. et al, 2004, "Sandia Photovoltaic Array Performance Model",
SAND Report 3535, Sandia National Laboratories, Albuquerque, NM.
See Also
--------
sapm
'''
temp_models = {'open_rack_cell_glassback': [-3.47, -.0594, 3],
'roof_mount_cell_glassback': [-2.98, -.0471, 1],
'open_rack_cell_polymerback': [-3.56, -.0750, 3],
'insulated_back_polymerback': [-2.81, -.0455, 0],
'open_rack_polymer_thinfilm_steel': [-3.58, -.113, 3],
'22x_concentrator_tracker': [-3.23, -.130, 13]
}
if isinstance(model, str):
model = temp_models[model.lower()]
elif isinstance(model, list):
model = model
a = model[0]
b = model[1]
deltaT = model[2]
E0 = 1000. # Reference irradiance
temp_module = pd.Series(irrad*np.exp(a + b*wind) + temp)
temp_cell = temp_module + (irrad / E0)*(deltaT)
return pd.DataFrame({'temp_cell': temp_cell, 'temp_module': temp_module})
def singlediode(module, photocurrent, saturation_current,
resistance_series, resistance_shunt, nNsVth):
'''
Solve the single-diode model to obtain a photovoltaic IV curve.
Singlediode solves the single diode equation [1]
.. math::
I = IL - I0*[exp((V+I*Rs)/(nNsVth))-1] - (V + I*Rs)/Rsh
for ``I`` and ``V`` when given
``IL, I0, Rs, Rsh,`` and ``nNsVth (nNsVth = n*Ns*Vth)`` which
are described later. Returns a DataFrame which contains
the 5 points on the I-V curve specified in SAND2004-3535 [3].
If all IL, I0, Rs, Rsh, and nNsVth are scalar, a single curve
will be returned, if any are Series (of the same length), multiple IV
curves will be calculated.
The input parameters can be calculated using calcparams_desoto from
meteorological data.
Parameters
----------
module : DataFrame
A DataFrame defining the SAPM performance parameters.
photocurrent : float or Series
Light-generated current (photocurrent) in amperes under desired IV
curve conditions. Often abbreviated ``I_L``.
saturation_current : float or Series
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
resistance_series : float or Series
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
resistance_shunt : float or Series
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
nNsVth : float or Series
The product of three components. 1) The usual diode ideal
factor (n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth).
The thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin,
and q is the charge of an electron (coulombs).
Returns
-------
If ``photocurrent`` is a Series, a DataFrame with the following columns.
All columns have the same number of rows as the largest input DataFrame.
If ``photocurrent`` is a scalar, a dict with the following keys.
* i_sc - short circuit current in amperes.
* v_oc - open circuit voltage in volts.
* i_mp - current at maximum power point in amperes.
* v_mp - voltage at maximum power point in volts.
* p_mp - power at maximum power point in watts.
* i_x - current, in amperes, at ``v = 0.5*v_oc``.
* i_xx - current, in amperes, at ``V = 0.5*(v_oc+v_mp)``.
Notes
-----
The solution employed to solve the implicit diode equation utilizes
the Lambert W function to obtain an explicit function of V=f(i) and
I=f(V) as shown in [2].
References
-----------
[1] S.R. Wenham, M.A. Green, M.E. Watt, "Applied Photovoltaics"
ISBN 0 86758 909 4
[2] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of
real solar cells using Lambert W-function", Solar Energy Materials
and Solar Cells, 81 (2004) 269-277.
[3] D. King et al, "Sandia Photovoltaic Array Performance Model",
SAND2004-3535, Sandia National Laboratories, Albuquerque, NM
See also
--------
sapm
calcparams_desoto
'''
pvl_logger.debug('pvsystem.singlediode')
# Find short circuit current using Lambert W
i_sc = i_from_v(resistance_shunt, resistance_series, nNsVth, 0.01,
saturation_current, photocurrent)
params = {'r_sh': resistance_shunt,
'r_s': resistance_series,
'nNsVth': nNsVth,
'i_0': saturation_current,
'i_l': photocurrent}
__, v_oc = _golden_sect_DataFrame(params, 0, module['V_oc_ref']*1.6,
_v_oc_optfcn)
p_mp, v_mp = _golden_sect_DataFrame(params, 0, module['V_oc_ref']*1.14,
_pwr_optfcn)
# Invert the Power-Current curve. Find the current where the inverted power
# is minimized. This is i_mp. Start the optimization at v_oc/2
i_mp = i_from_v(resistance_shunt, resistance_series, nNsVth, v_mp,
saturation_current, photocurrent)
# Find Ix and Ixx using Lambert W
i_x = i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5*v_oc, saturation_current, photocurrent)
i_xx = i_from_v(resistance_shunt, resistance_series, nNsVth,
0.5*(v_oc+v_mp), saturation_current, photocurrent)
# @wholmgren: need to move this stuff to a different function
# If the user says they want a curve of with number of points equal to
# NumPoints (must be >=2), then create a voltage array where voltage is
# zero in the first column, and Voc in the last column. Number of columns
# must equal NumPoints. Each row represents the voltage for one IV curve.
# Then create a current array where current is Isc in the first column, and
# zero in the last column, and each row represents the current in one IV
# curve. Thus the nth (V,I) point of curve m would be found as follows:
# (Result.V(m,n),Result.I(m,n)).
# if NumPoints >= 2
# s = ones(1,NumPoints); # shaping DataFrame to shape the column DataFrame parameters into 2-D matrices
# Result.V = (Voc)*(0:1/(NumPoints-1):1);
# Result.I = I_from_V(Rsh*s, Rs*s, nNsVth*s, Result.V, I0*s, IL*s);
# end
dfout = {}
dfout['i_sc'] = i_sc
dfout['i_mp'] = i_mp
dfout['v_oc'] = v_oc
dfout['v_mp'] = v_mp
dfout['p_mp'] = p_mp
dfout['i_x'] = i_x
dfout['i_xx'] = i_xx
try:
dfout = pd.DataFrame(dfout, index=photocurrent.index)
except AttributeError:
pass
return dfout
# Created April,2014
# Author: Rob Andrews, Calama Consulting
def _golden_sect_DataFrame(params, VL, VH, func):
'''
Vectorized golden section search for finding MPPT
from a dataframe timeseries.
Parameters
----------
params : dict
Dictionary containing scalars or arrays
of inputs to the function to be optimized.
Each row should represent an independent optimization.
VL: float
Lower bound of the optimization
VH: float
Upper bound of the optimization
func: function
Function to be optimized must be in the form f(array-like, x)
Returns
-------
func(df,'V1') : DataFrame
function evaluated at the optimal point
df['V1']: Dataframe
Dataframe of optimal points
Notes
-----
This funtion will find the MAXIMUM of a function
'''
df = params
df['VH'] = VH
df['VL'] = VL
err = df['VH'] - df['VL']
errflag = True
iterations = 0
while errflag:
phi = (np.sqrt(5)-1)/2*(df['VH']-df['VL'])
df['V1'] = df['VL'] + phi
df['V2'] = df['VH'] - phi
df['f1'] = func(df, 'V1')
df['f2'] = func(df, 'V2')
df['SW_Flag'] = df['f1'] > df['f2']
df['VL'] = df['V2']*df['SW_Flag'] + df['VL']*(~df['SW_Flag'])
df['VH'] = df['V1']*~df['SW_Flag'] + df['VH']*(df['SW_Flag'])
err = df['V1'] - df['V2']
try:
errflag = (abs(err)>.01).all()
except ValueError:
errflag = (abs(err)>.01)
iterations += 1
if iterations > 50:
raise Exception("EXCEPTION:iterations exeeded maximum (50)")
return func(df, 'V1'), df['V1']
def _pwr_optfcn(df, loc):
'''
Function to find power from ``i_from_v``.
'''
I = i_from_v(df['r_sh'], df['r_s'], df['nNsVth'],
df[loc], df['i_0'], df['i_l'])
return I*df[loc]
def _v_oc_optfcn(df, loc):
'''
Function to find the open circuit voltage from ``i_from_v``.
'''
I = -abs(i_from_v(df['r_sh'], df['r_s'], df['nNsVth'],
df[loc], df['i_0'], df['i_l']))
return I
def i_from_v(resistance_shunt, resistance_series, nNsVth, voltage,
saturation_current, photocurrent):
'''
Calculates current from voltage per Eq 2 Jain and Kapoor 2004 [1].
Parameters
----------
resistance_series : float or Series
Series resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rs``.
resistance_shunt : float or Series
Shunt resistance in ohms under desired IV curve conditions.
Often abbreviated ``Rsh``.
saturation_current : float or Series
Diode saturation current in amperes under desired IV curve
conditions. Often abbreviated ``I_0``.
nNsVth : float or Series
The product of three components. 1) The usual diode ideal
factor (n), 2) the number of cells in series (Ns), and 3) the cell
thermal voltage under the desired IV curve conditions (Vth).
The thermal voltage of the cell (in volts) may be calculated as
``k*temp_cell/q``, where k is Boltzmann's constant (J/K),
temp_cell is the temperature of the p-n junction in Kelvin,
and q is the charge of an electron (coulombs).
photocurrent : float or Series
Light-generated current (photocurrent) in amperes under desired IV
curve conditions. Often abbreviated ``I_L``.
Returns
-------
current : np.array
References
----------
[1] A. Jain, A. Kapoor, "Exact analytical solutions of the parameters of
real solar cells using Lambert W-function", Solar Energy Materials
and Solar Cells, 81 (2004) 269-277.
'''
try:
from scipy.special import lambertw
except ImportError:
raise ImportError('This function requires scipy')
Rsh = resistance_shunt
Rs = resistance_series
I0 = saturation_current
IL = photocurrent
V = voltage
argW = (Rs*I0*Rsh *
np.exp( Rsh*(Rs*(IL+I0)+V) / (nNsVth*(Rs+Rsh)) ) /
(nNsVth*(Rs + Rsh)) )
lambertwterm = lambertw(argW)
pvl_logger.debug('argW: {}, lambertwterm{}'.format(argW, lambertwterm))
# Eqn. 4 in Jain and Kapoor, 2004
I = -V/(Rs + Rsh) - (nNsVth/Rs)*lambertwterm + Rsh*(IL + I0)/(Rs + Rsh)
return I.real
def snlinverter(inverter, v_dc, p_dc):
'''
Converts DC power and voltage to AC power using
Sandia's Grid-Connected PV Inverter model.
Determines the AC power output of an inverter given the DC voltage, DC
power, and appropriate Sandia Grid-Connected Photovoltaic Inverter
Model parameters. The output, ac_power, is clipped at the maximum power
output, and gives a negative power during low-input power conditions,
but does NOT account for maximum power point tracking voltage windows
nor maximum current or voltage limits on the inverter.
Parameters
----------
inverter : DataFrame
A DataFrame defining the inverter to be used, giving the
inverter performance parameters according to the Sandia
Grid-Connected Photovoltaic Inverter Model (SAND 2007-5036) [1].
A set of inverter performance parameters are provided with pvlib,
or may be generated from a System Advisor Model (SAM) [2]
library using retrievesam.
Required DataFrame columns are:
====== ============================================================
Column Description
====== ============================================================
Pac0 AC-power output from inverter based on input power
and voltage (W)
Pdc0 DC-power input to inverter, typically assumed to be equal
to the PV array maximum power (W)
Vdc0 DC-voltage level at which the AC-power rating is achieved
at the reference operating condition (V)
Ps0 DC-power required to start the inversion process, or
self-consumption by inverter, strongly influences inverter
efficiency at low power levels (W)
C0 Parameter defining the curvature (parabolic) of the
relationship between ac-power and dc-power at the reference
operating condition, default value of zero gives a
linear relationship (1/W)
C1 Empirical coefficient allowing Pdco to vary linearly
with dc-voltage input, default value is zero (1/V)
C2 Empirical coefficient allowing Pso to vary linearly with
dc-voltage input, default value is zero (1/V)
C3 Empirical coefficient allowing Co to vary linearly with
dc-voltage input, default value is zero (1/V)
Pnt AC-power consumed by inverter at night (night tare) to
maintain circuitry required to sense PV array voltage (W)
====== ============================================================
v_dc : float or Series
DC voltages, in volts, which are provided as input to the inverter.
Vdc must be >= 0.
p_dc : float or Series
A scalar or DataFrame of DC powers, in watts, which are provided
as input to the inverter. Pdc must be >= 0.
Returns
-------
ac_power : float or Series
Modeled AC power output given the input
DC voltage, Vdc, and input DC power, Pdc. When ac_power would be
greater than Pac0, it is set to Pac0 to represent inverter
"clipping". When ac_power would be less than Ps0 (startup power
required), then ac_power is set to -1*abs(Pnt) to represent nightly
power losses. ac_power is not adjusted for maximum power point
tracking (MPPT) voltage windows or maximum current limits of the
inverter.
References
----------
[1] SAND2007-5036, "Performance Model for Grid-Connected Photovoltaic
Inverters by D. King, S. Gonzalez, G. Galbraith, W. Boyson
[2] System Advisor Model web page. https://sam.nrel.gov.
See also
--------
sapm
singlediode
'''
Paco = inverter['Paco']
Pdco = inverter['Pdco']
Vdco = inverter['Vdco']
Pso = inverter['Pso']
C0 = inverter['C0']
C1 = inverter['C1']
C2 = inverter['C2']
C3 = inverter['C3']
Pnt = inverter['Pnt']
A = Pdco * (1 + C1*(v_dc - Vdco))
B = Pso * (1 + C2*(v_dc - Vdco))
C = C0 * (1 + C3*(v_dc - Vdco))
# ensures that function works with scalar or Series input
p_dc = pd.Series(p_dc)
ac_power = ( Paco/(A-B) - C*(A-B) ) * (p_dc-B) + C*((p_dc-B)**2)
ac_power[ac_power > Paco] = Paco
ac_power[ac_power < Pso] = - 1.0 * abs(Pnt)
if len(ac_power) == 1:
ac_power = ac_power.ix[0]
return ac_power
| bsd-3-clause |
Biles430/FPF_PIV | piv_outer.py | 1 | 3559 | import pandas as pd
from pandas import DataFrame
import numpy as np
import PIV
import h5py
import matplotlib.pyplot as plt
import hotwire as hw
################################################
# PURPOSE
# 1. Compute Integral Parameters
# 2. Outer Normalize
# 3. Plot
##################################################
#note- vel and axis are flipped to properlly calc delta
def piv_outer(date, num_tests, legend1):
#initalize variables
umean_fov = dict()
vmean_fov = dict()
umean = dict()
vmean = dict()
urms = dict()
vrms = dict()
uvprime = dict()
x = dict()
y = dict()
for j in range(0, num_tests):
#read in variables
name = 'data/PIV_' + date + '_' +str(j) + '.h5'
umean_fov[j] = np.array(pd.read_hdf(name, 'umean'))
vmean_fov[j] = np.array(pd.read_hdf(name, 'vmean'))
umean[j] = np.array(pd.read_hdf(name, 'umean_profile_avg'))
vmean[j] = np.array(pd.read_hdf(name, 'vmean_profile_avg'))
urms[j] = np.array(pd.read_hdf(name, 'urms_profile_avg'))
vrms[j] = np.array(pd.read_hdf(name, 'vrms_profile_avg'))
uvprime[j] = np.array(pd.read_hdf(name, 'uvprime_profile_avg'))
x[j] = np.array(pd.read_hdf(name, 'xaxis'))
y[j] = np.array(pd.read_hdf(name, 'yaxis'))
###2. Outer Normalize #############
###################################
###3. PLOTS ######################
###################################
marker_u = ['-xr', '-or','-sr']
marker_v = ['-xb', '-ob','-sb']
#mean profiles
#U vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], umean[j], marker_u[j])
plt.ylabel('U (m/sec)', fontsize=14)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#V vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], vmean[j], marker_v[j])
plt.ylabel('V (m/sec)', fontsize=14)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#urms vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], urms[j], marker_u[j])
plt.ylabel('$U_{rms}$ (m/sec)', fontsize=20)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#vrms vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], vrms[j], marker_v[j])
plt.ylabel('$V_{rms}$ (m/sec)', fontsize=20)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
#uprime vs y
plt.figure()
for j in range(0, num_tests):
plt.plot(y[j], uvprime[j], marker_u[j])
plt.ylabel('$u^,v^,$', fontsize=20)
plt.xlabel('Wall Normal Position (m)', fontsize=14)
plt.legend(legend1, loc=0)
plt.show()
### Mean Vecotr plot
skip_num = 5
umean_fov2 = umean_fov[0]
vmean_fov2 = vmean_fov[0]
x2 = x[0]
umean_fov2 = umean_fov2[:, 0:-1:skip_num]
vmean_fov2 = vmean_fov2[:, 0:-1:skip_num]
x2 = x2[0:-1:skip_num]
y2 = y[0]
Y = np.tile(y2, (len(x2), 1))
Y = np.transpose(Y)
X = np.tile(x2-.0543, (len(y2), 1))
mean_fov2 = (umean_fov2**2 + vmean_fov2**2)**(1/2)
contour_levels = np.arange(0, 5, .05)
plt.figure()
c = plt.contourf(X, Y, mean_fov2, levels = contour_levels, linewidth=40, alpha=.6)
cbar = plt.colorbar(c)
cbar.ax.set_ylabel('Velocity (m/sec)')
plt.hold(True)
q = plt.quiver(X, Y, umean_fov2, vmean_fov2, angles='xy', scale=50, width=.0025)
p = plt.quiverkey(q, .11, -.025, 4,"4 m/s",coordinates='data',color='r')
plt.axis([0, .1, 0, .2])
plt.ylabel('Wall Normal Position, $y/\delta$', fontsize=18)
plt.xlabel('Streamwise Position, x (m)', fontsize=14)
plt.title('Mean PIV Vector Field', fontsize=14)
plt.show()
print('Done!')
return
| mit |
deepesch/scikit-learn | sklearn/tests/test_lda.py | 77 | 6258 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
harisbal/pandas | pandas/tests/reshape/merge/test_merge_index_as_string.py | 7 | 5670 | import numpy as np
import pytest
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
@pytest.fixture
def df1():
return DataFrame(dict(
outer=[1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4],
inner=[1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2],
v1=np.linspace(0, 1, 11)))
@pytest.fixture
def df2():
return DataFrame(dict(
outer=[1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3],
inner=[1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3],
v2=np.linspace(10, 11, 12)))
@pytest.fixture(params=[[], ['outer'], ['outer', 'inner']])
def left_df(request, df1):
""" Construct left test DataFrame with specified levels
(any of 'outer', 'inner', and 'v1')"""
levels = request.param
if levels:
df1 = df1.set_index(levels)
return df1
@pytest.fixture(params=[[], ['outer'], ['outer', 'inner']])
def right_df(request, df2):
""" Construct right test DataFrame with specified levels
(any of 'outer', 'inner', and 'v2')"""
levels = request.param
if levels:
df2 = df2.set_index(levels)
return df2
def compute_expected(df_left, df_right,
on=None, left_on=None, right_on=None, how=None):
"""
Compute the expected merge result for the test case.
This method computes the expected result of merging two DataFrames on
a combination of their columns and index levels. It does so by
explicitly dropping/resetting their named index levels, performing a
merge on their columns, and then finally restoring the appropriate
index in the result.
Parameters
----------
df_left : DataFrame
The left DataFrame (may have zero or more named index levels)
df_right : DataFrame
The right DataFrame (may have zero or more named index levels)
on : list of str
The on parameter to the merge operation
left_on : list of str
The left_on parameter to the merge operation
right_on : list of str
The right_on parameter to the merge operation
how : str
The how parameter to the merge operation
Returns
-------
DataFrame
The expected merge result
"""
# Handle on param if specified
if on is not None:
left_on, right_on = on, on
# Compute input named index levels
left_levels = [n for n in df_left.index.names if n is not None]
right_levels = [n for n in df_right.index.names if n is not None]
# Compute output named index levels
output_levels = [i for i in left_on
if i in right_levels and i in left_levels]
# Drop index levels that aren't involved in the merge
drop_left = [n for n in left_levels if n not in left_on]
if drop_left:
df_left = df_left.reset_index(drop_left, drop=True)
drop_right = [n for n in right_levels if n not in right_on]
if drop_right:
df_right = df_right.reset_index(drop_right, drop=True)
# Convert remaining index levels to columns
reset_left = [n for n in left_levels if n in left_on]
if reset_left:
df_left = df_left.reset_index(level=reset_left)
reset_right = [n for n in right_levels if n in right_on]
if reset_right:
df_right = df_right.reset_index(level=reset_right)
# Perform merge
expected = df_left.merge(df_right,
left_on=left_on,
right_on=right_on,
how=how)
# Restore index levels
if output_levels:
expected = expected.set_index(output_levels)
return expected
@pytest.mark.parametrize('on,how',
[(['outer'], 'inner'),
(['inner'], 'left'),
(['outer', 'inner'], 'right'),
(['inner', 'outer'], 'outer')])
def test_merge_indexes_and_columns_on(left_df, right_df, on, how):
# Construct expected result
expected = compute_expected(left_df, right_df, on=on, how=how)
# Perform merge
result = left_df.merge(right_df, on=on, how=how)
assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize('left_on,right_on,how',
[(['outer'], ['outer'], 'inner'),
(['inner'], ['inner'], 'right'),
(['outer', 'inner'], ['outer', 'inner'], 'left'),
(['inner', 'outer'], ['inner', 'outer'], 'outer')])
def test_merge_indexes_and_columns_lefton_righton(
left_df, right_df, left_on, right_on, how):
# Construct expected result
expected = compute_expected(left_df, right_df,
left_on=left_on,
right_on=right_on,
how=how)
# Perform merge
result = left_df.merge(right_df,
left_on=left_on, right_on=right_on, how=how)
assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize('left_index',
['inner', ['inner', 'outer']])
def test_join_indexes_and_columns_on(df1, df2, left_index, join_type):
# Construct left_df
left_df = df1.set_index(left_index)
# Construct right_df
right_df = df2.set_index(['outer', 'inner'])
# Result
expected = (left_df.reset_index()
.join(right_df, on=['outer', 'inner'], how=join_type,
lsuffix='_x', rsuffix='_y')
.set_index(left_index))
# Perform join
result = left_df.join(right_df, on=['outer', 'inner'], how=join_type,
lsuffix='_x', rsuffix='_y')
assert_frame_equal(result, expected, check_like=True)
| bsd-3-clause |
altairpearl/scikit-learn | sklearn/cluster/bicluster.py | 66 | 19850 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
cbertinato/pandas | pandas/core/reshape/pivot.py | 1 | 21644 | import numpy as np
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.common import is_integer_dtype, is_list_like, is_scalar
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.core.common as com
from pandas.core.frame import _shared_docs
from pandas.core.groupby import Grouper
from pandas.core.index import Index, MultiIndex, _get_objs_combined_axis
from pandas.core.reshape.concat import concat
from pandas.core.reshape.util import cartesian_product
from pandas.core.series import Series
# Note: We need to make sure `frame` is imported before `pivot`, otherwise
# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency
@Substitution('\ndata : DataFrame')
@Appender(_shared_docs['pivot_table'], indents=1)
def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean',
fill_value=None, margins=False, dropna=True,
margins_name='All', observed=False):
index = _convert_by(index)
columns = _convert_by(columns)
if isinstance(aggfunc, list):
pieces = []
keys = []
for func in aggfunc:
table = pivot_table(data, values=values, index=index,
columns=columns,
fill_value=fill_value, aggfunc=func,
margins=margins, dropna=dropna,
margins_name=margins_name,
observed=observed)
pieces.append(table)
keys.append(getattr(func, '__name__', func))
return concat(pieces, keys=keys, axis=1)
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how='all')
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (v in data and is_integer_dtype(data[v]) and
v in agged and not is_integer_dtype(agged[v])):
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
if table.index.nlevels > 1:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[:len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
from pandas import MultiIndex
if table.index.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.index.levels),
names=table.index.names)
table = table.reindex(m, axis=0)
if table.columns.nlevels > 1:
m = MultiIndex.from_arrays(cartesian_product(table.columns.levels),
names=table.columns.names)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
table = table.fillna(value=fill_value, downcast='infer')
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(table, data, values, rows=index,
cols=columns, aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name, fill_value=fill_value)
# discard the top level
if (values_passed and not values_multi and not table.empty and
(table.columns.nlevels > 1)):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how='all', axis=1)
return table
def _add_margins(table, data, values, rows, cols, aggfunc,
observed=None, margins_name='All', fill_value=None):
if not isinstance(margins_name, str):
raise ValueError('margins_name argument must be a string')
msg = 'Conflicting name "{name}" in margins'.format(name=margins_name)
for level in table.index.names:
if margins_name in table.index.get_level_values(level):
raise ValueError(msg)
grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name)
# could be passed a Series object with no 'columns'
if hasattr(table, 'columns'):
for level in table.columns.names[1:]:
if margins_name in table.columns.get_level_values(level):
raise ValueError(msg)
if len(rows) > 1:
key = (margins_name,) + ('',) * (len(rows) - 1)
else:
key = margins_name
if not values and isinstance(table, ABCSeries):
# If there are no values and the table is a series, then there is only
# one column in the data. Compute grand margin and return it.
return table.append(Series({key: grand_margin[margins_name]}))
if values:
marginal_result_set = _generate_marginal_results(table, data, values,
rows, cols, aggfunc,
observed,
grand_margin,
margins_name)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
else:
marginal_result_set = _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc, observed, margins_name)
if not isinstance(marginal_result_set, tuple):
return marginal_result_set
result, margin_keys, row_margin = marginal_result_set
row_margin = row_margin.reindex(result.columns, fill_value=fill_value)
# populate grand margin
for k in margin_keys:
if isinstance(k, str):
row_margin[k] = grand_margin[k]
else:
row_margin[k] = grand_margin[k[0]]
from pandas import DataFrame
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
try:
for dtype in set(result.dtypes):
cols = result.select_dtypes([dtype]).columns
margin_dummy[cols] = margin_dummy[cols].astype(dtype)
result = result.append(margin_dummy)
except TypeError:
# we cannot reshape, so coerce the axis
result.index = result.index._to_safe_for_reshape()
result = result.append(margin_dummy)
result.index.names = row_names
return result
def _compute_grand_margin(data, values, aggfunc,
margins_name='All'):
if values:
grand_margin = {}
for k, v in data[values].iteritems():
try:
if isinstance(aggfunc, str):
grand_margin[k] = getattr(v, aggfunc)()
elif isinstance(aggfunc, dict):
if isinstance(aggfunc[k], str):
grand_margin[k] = getattr(v, aggfunc[k])()
else:
grand_margin[k] = aggfunc[k](v)
else:
grand_margin[k] = aggfunc(v)
except TypeError:
pass
return grand_margin
else:
return {margins_name: aggfunc(data.index)}
def _generate_marginal_results(table, data, values, rows, cols, aggfunc,
observed,
grand_margin,
margins_name='All'):
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
margin_keys = []
def _all_key(key):
return (key, margins_name) + ('',) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows + values].groupby(
rows, observed=observed).agg(aggfunc)
cat_axis = 1
for key, piece in table.groupby(level=0,
axis=cat_axis,
observed=observed):
all_key = _all_key(key)
# we are going to mutate this, so need to copy!
piece = piece.copy()
try:
piece[all_key] = margin[key]
except TypeError:
# we cannot reshape, so coerce the axis
piece.set_axis(piece._get_axis(
cat_axis)._to_safe_for_reshape(),
axis=cat_axis, inplace=True)
piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
else:
margin = grand_margin
cat_axis = 0
for key, piece in table.groupby(level=0,
axis=cat_axis,
observed=observed):
all_key = _all_key(key)
table_pieces.append(piece)
table_pieces.append(Series(margin[key], index=[all_key]))
margin_keys.append(all_key)
result = concat(table_pieces, axis=cat_axis)
if len(rows) == 0:
return result
else:
result = table
margin_keys = table.columns
if len(cols) > 0:
row_margin = data[cols + values].groupby(
cols, observed=observed).agg(aggfunc)
row_margin = row_margin.stack()
# slight hack
new_order = [len(cols)] + list(range(len(cols)))
row_margin.index = row_margin.index.reorder_levels(new_order)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _generate_marginal_results_without_values(
table, data, rows, cols, aggfunc,
observed, margins_name='All'):
if len(cols) > 0:
# need to "interleave" the margins
margin_keys = []
def _all_key():
if len(cols) == 1:
return margins_name
return (margins_name, ) + ('', ) * (len(cols) - 1)
if len(rows) > 0:
margin = data[rows].groupby(rows,
observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
else:
margin = data.groupby(level=0,
axis=0,
observed=observed).apply(aggfunc)
all_key = _all_key()
table[all_key] = margin
result = table
margin_keys.append(all_key)
return result
else:
result = table
margin_keys = table.columns
if len(cols):
row_margin = data[cols].groupby(cols, observed=observed).apply(aggfunc)
else:
row_margin = Series(np.nan, index=result.columns)
return result, margin_keys, row_margin
def _convert_by(by):
if by is None:
by = []
elif (is_scalar(by) or
isinstance(by, (np.ndarray, Index, ABCSeries, Grouper)) or
hasattr(by, '__call__')):
by = [by]
else:
by = list(by)
return by
@Substitution('\ndata : DataFrame')
@Appender(_shared_docs['pivot'], indents=1)
def pivot(data, index=None, columns=None, values=None):
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = data.set_index(cols, append=append)
else:
if index is None:
index = data.index
else:
index = data[index]
index = MultiIndex.from_arrays([index, data[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = data._constructor(data[values].values, index=index,
columns=values)
else:
indexed = data._constructor_sliced(data[values].values,
index=index)
return indexed.unstack(columns)
def crosstab(index, columns, values=None, rownames=None, colnames=None,
aggfunc=None, margins=False, margins_name='All', dropna=True,
normalize=False):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
.. versionadded:: 0.21.0
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
.. versionadded:: 0.18.1
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
rownames = _get_names(index, rownames, prefix='row')
colnames = _get_names(columns, colnames, prefix='col')
common_idx = _get_objs_combined_axis(index + columns, intersect=True,
sort=False)
data = {}
data.update(zip(rownames, index))
data.update(zip(colnames, columns))
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
from pandas import DataFrame
df = DataFrame(data, index=common_idx)
if values is None:
df['__dummy__'] = 0
kwargs = {'aggfunc': len, 'fill_value': 0}
else:
df['__dummy__'] = values
kwargs = {'aggfunc': aggfunc}
table = df.pivot_table('__dummy__', index=rownames, columns=colnames,
margins=margins, margins_name=margins_name,
dropna=dropna, **kwargs)
# Post-process
if normalize is not False:
table = _normalize(table, normalize=normalize, margins=margins,
margins_name=margins_name)
return table
def _normalize(table, normalize, margins, margins_name='All'):
if not isinstance(normalize, (bool, str)):
axis_subs = {0: 'index', 1: 'columns'}
try:
normalize = axis_subs[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
if margins is False:
# Actual Normalizations
normalizers = {
'all': lambda x: x / x.sum(axis=1).sum(axis=0),
'columns': lambda x: x / x.sum(),
'index': lambda x: x.div(x.sum(axis=1), axis=0)
}
normalizers[True] = normalizers['all']
try:
f = normalizers[normalize]
except KeyError:
raise ValueError("Not a valid normalize argument")
table = f(table)
table = table.fillna(0)
elif margins is True:
column_margin = table.loc[:, margins_name].drop(margins_name)
index_margin = table.loc[margins_name, :].drop(margins_name)
table = table.drop(margins_name, axis=1).drop(margins_name)
# to keep index and columns names
table_index_names = table.index.names
table_columns_names = table.columns.names
# Normalize core
table = _normalize(table, normalize=normalize, margins=False)
# Fix Margins
if normalize == 'columns':
column_margin = column_margin / column_margin.sum()
table = concat([table, column_margin], axis=1)
table = table.fillna(0)
elif normalize == 'index':
index_margin = index_margin / index_margin.sum()
table = table.append(index_margin)
table = table.fillna(0)
elif normalize == "all" or normalize is True:
column_margin = column_margin / column_margin.sum()
index_margin = index_margin / index_margin.sum()
index_margin.loc[margins_name] = 1
table = concat([table, column_margin], axis=1)
table = table.append(index_margin)
table = table.fillna(0)
else:
raise ValueError("Not a valid normalize argument")
table.index.names = table_index_names
table.columns.names = table_columns_names
else:
raise ValueError("Not a valid margins argument")
return table
def _get_names(arrs, names, prefix='row'):
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, ABCSeries) and arr.name is not None:
names.append(arr.name)
else:
names.append('{prefix}_{i}'.format(prefix=prefix, i=i))
else:
if len(names) != len(arrs):
raise AssertionError('arrays and names must have the same length')
if not isinstance(names, list):
names = list(names)
return names
| bsd-3-clause |
jayflo/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
OMS-NetZero/FAIR | setup.py | 1 | 1405 | from setuptools import setup
from setuptools import find_packages
import versioneer
# README #
def readme():
with open('README.rst') as f:
return f.read()
AUTHORS = [
("Chris Smith", "[email protected]"),
("Richard Millar", "[email protected]"),
("Zeb Nicholls", "[email protected]"),
("Myles Allen", "[email protected]"),
]
setup(
name='fair',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Python package to perform calculations with the FaIR simple climate model',
long_description=readme(),
keywords='simple climate model temperature response carbon cycle emissions forcing',
url='https://github.com/OMS-NetZero/FAIR',
author=", ".join([author[0] for author in AUTHORS]),
author_email=", ".join([author[1] for author in AUTHORS]),
license='Apache 2.0',
packages=find_packages(exclude=['docs*']),
package_data={'': ['*.csv']},
python_requires='>=3.6, <4',
include_package_data=True,
install_requires=[
'matplotlib',
'numpy>=1.14.5',
'scipy>=0.19.0',
'pandas',
],
zip_safe=False,
extras_require={
'docs': ['sphinx>=1.4', 'nbsphinx'],
'dev' : ['notebook', 'scmdata>=0.7.1', 'wheel', 'twine'],
'test': ['pytest>=4.0', 'nbval', 'pytest-cov', 'codecov']
}
)
| apache-2.0 |
jpautom/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
Windy-Ground/scikit-learn | examples/ensemble/plot_feature_transformation.py | 67 | 4285 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator)
rt_lm = LogisticRegression()
rt.fit(X_train, y_train)
rt_lm.fit(rt.transform(X_train_lr), y_train_lr)
y_pred_rt = rt_lm.predict_proba(rt.transform(X_test))[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show() | bsd-3-clause |
BeckResearchLab/USP-inhibition | tests/test_models.py | 9 | 3129 | import unittest
import bacteriopop_utils
import load_data
import numpy as np
import pandas as pd
import pandas as pd
import requests
print "hello!"
class TestUrlsExist(unittest.TestCase):
def test_raw_data_link(self):
"""
Test for existence of raw_data.csv link in load_data module.
"""
request = requests.get("https://raw.githubusercontent.com/"
"JanetMatsen/bacteriopop/master/raw_data/"
"raw_data.csv")
self.assertEqual(request.status_code, 200)
def test_sample_meta_info_link(self):
"""
Test for existence of sample_meta_info.tsv link in load_data module.
"""
request = requests.get("https://raw.githubusercontent.com/"
"JanetMatsen/bacteriopop/master/raw_data/"
"sample_meta_info.tsv")
self.assertEqual(request.status_code, 200)
class TestDataframe(unittest.TestCase):
def test_df_columns(self):
"""
Test for output dataframe column count in load_data module.
"""
df = load_data.load_data()
cols = df.columns.tolist()
num = len(cols)
num_assert = len(['kingdom', 'phylum', 'class', 'order',
'family', 'genus', 'length', 'oxygen',
'replicate', 'week', 'abundance'])
self.assertEqual(num, num_assert)
def test_df_type(self):
"""
Test for type of the output dataframe in load_data module.
"""
df = load_data.load_data()
self.assertEqual(type(df), pd.DataFrame)
class TestExtractFeatures(unittest.TestCase):
def test_on_animal_df(self):
"""
Simple example with expected numpy vector to compare to.
Use fillna mode.
"""
animal_df = pd.DataFrame({'animal': ['dog', 'cat', 'rat'],
'color': ['white', 'brown', 'brown'],
'gender': ['F', 'F', np.NaN],
'weight': [25, 5, 1],
'garbage': [0, 1, np.NaN],
'abundance': [0.5, 0.4, 0.1]})
extracted = bacteriopop_utils.extract_features(
dataframe=animal_df,
column_list=['animal', 'color', 'weight', 'abundance'],
fillna=True
)
# check that the column names match what is expected
self.assertEqual(extracted.columns.tolist(),
['abundance', 'animal=cat', 'animal=dog',
'animal=rat', 'color=brown', 'color=white',
'weight'])
# check that the values are what was expected.
expected_result = np.array([[0.5, 0., 1., 0., 0., 1., 25.],
[0.4, 1., 0., 0., 1., 0., 5.],
[0.1, 0., 0., 1., 1., 0., 1.]])
self.assertEqual(expected_result.tolist(),
extracted.as_matrix().tolist())
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ahaberlie/MetPy | tests/plots/test_cartopy_utils.py | 3 | 2612 | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the cartopy utilities."""
import cartopy.crs as ccrs
import matplotlib
import matplotlib.pyplot as plt
import pytest
from metpy.plots import USCOUNTIES, USSTATES
# Fixtures to make sure we have the right backend and consistent round
from metpy.testing import patch_round, set_agg_backend # noqa: F401, I202
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(tolerance=0.053, remove_text=True)
def test_us_county_defaults():
"""Test the default US county plotting."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
ax.add_feature(USCOUNTIES)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.092, remove_text=True)
def test_us_county_scales():
"""Test US county plotting with all scales."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270.25, 270.9, 38.15, 38.75], ccrs.Geodetic())
axis.add_feature(USCOUNTIES.with_scale(scale))
return fig
@pytest.mark.mpl_image_compare(tolerance=0.053, remove_text=True)
def test_us_states_defaults():
"""Test the default US States plotting."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.set_extent([270, 280, 28, 39], ccrs.Geodetic())
ax.add_feature(USSTATES)
return fig
@pytest.mark.mpl_image_compare(tolerance=0.092, remove_text=True)
def test_us_states_scales():
"""Test the default US States plotting with all scales."""
proj = ccrs.LambertConformal(central_longitude=-85.0, central_latitude=45.0)
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(1, 3, 1, projection=proj)
ax2 = fig.add_subplot(1, 3, 2, projection=proj)
ax3 = fig.add_subplot(1, 3, 3, projection=proj)
for scale, axis in zip(['20m', '5m', '500k'], [ax1, ax2, ax3]):
axis.set_extent([270, 280, 28, 39], ccrs.Geodetic())
axis.add_feature(USSTATES.with_scale(scale))
return fig
| bsd-3-clause |
dpshelio/sunpy | examples/units_and_coordinates/AIA_limb_STEREO.py | 1 | 3832 | # -*- coding: utf-8 -*-
"""
===========================================
Drawing the AIA limb on a STEREO EUVI image
===========================================
In this example we use a STEREO-B and an SDO image to demonstrate how to
overplot the limb as seen by AIA on an EUVI-B image. Then we overplot the AIA
coordinate grid on the STEREO image.
"""
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import SkyCoord
import sunpy.map
import sunpy.coordinates.wcs_utils
from sunpy.net import Fido, attrs as a
##############################################################################
# The first step is to download some data, we are going to get an image from
# early 2011 when the STEREO spacecraft were roughly 90 deg separated from the
# Earth.
stereo = (a.vso.Source('STEREO_B') &
a.Instrument('EUVI') &
a.Time('2011-01-01', '2011-01-01T00:10:00'))
aia = (a.Instrument('AIA') &
a.vso.Sample(24 * u.hour) &
a.Time('2011-01-01', '2011-01-02'))
wave = a.Wavelength(30 * u.nm, 31 * u.nm)
result = Fido.search(wave, aia | stereo)
###############################################################################
# Let's inspect the result
print(result)
##############################################################################
# and download the files
downloaded_files = Fido.fetch(result)
print(downloaded_files)
##############################################################################
# Let's create a dictionary with the two maps, which we crop to full disk.
maps = {m.detector: m.submap(SkyCoord([-1100, 1100], [-1100, 1100],
unit=u.arcsec, frame=m.coordinate_frame))
for m in sunpy.map.Map(downloaded_files)}
##############################################################################
# Next, let's calculate points on the limb in the AIA image for the half that
# can be seen from STEREO's point of view.
r = maps['AIA'].rsun_obs - 1 * u.arcsec # remove one arcsec so it's on disk.
# Adjust the following range if you only want to plot on STEREO_A
th = np.linspace(-180 * u.deg, 0 * u.deg)
x = r * np.sin(th)
y = r * np.cos(th)
coords = SkyCoord(x, y, frame=maps['AIA'].coordinate_frame)
##############################################################################
# Now, let's plot both maps
fig = plt.figure(figsize=(10, 4))
ax1 = fig.add_subplot(1, 2, 1, projection=maps['AIA'])
maps['AIA'].plot(axes=ax1)
maps['AIA'].draw_limb()
ax2 = fig.add_subplot(1, 2, 2, projection=maps['EUVI'])
maps['EUVI'].plot(axes=ax2)
ax2.plot_coord(coords, color='w')
##############################################################################
# Let's also plot the helioprojective coordinate grid as seen by SDO on the
# STEREO image.
fig = plt.figure()
ax = plt.subplot(projection=maps['EUVI'])
maps['EUVI'].plot()
# Move the title so it does not clash with the extra labels.
tx, ty = ax.title.get_position()
ax.title.set_position([tx, ty + 0.08])
# Change the default grid labels.
stereo_x, stereo_y = ax.coords
stereo_x.set_axislabel("Helioprojective Longitude (STEREO B) [arcsec]")
stereo_y.set_axislabel("Helioprojective Latitude (STEREO B) [arcsec]")
# Add a new coordinate overlay in the SDO frame.
overlay = ax.get_coords_overlay(maps['AIA'].coordinate_frame)
overlay.grid()
# Configure the grid:
x, y = overlay
# Set the ticks to be on the top and left axes.
x.set_ticks_position('tr')
y.set_ticks_position('tr')
# Wrap the longitude at 180 deg rather than the default 360.
x.set_coord_type('longitude', 180.)
# Change the defaults to arcseconds
x.set_major_formatter('s.s')
y.set_major_formatter('s.s')
# Add axes labels
x.set_axislabel("Helioprojective Longitude (SDO) [arcsec]")
y.set_axislabel("Helioprojective Latitude (SDO) [arcsec]")
plt.show()
| bsd-2-clause |
DonBeo/scikit-learn | benchmarks/bench_mnist.py | 154 | 6006 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100))
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
GoogleCloudPlatform/mlops-on-gcp | workshops/kfp-caip-sklearn/lab-02-kfp-pipeline/pipeline/helper_components.py | 12 | 2846 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""Helper components."""
from typing import NamedTuple
def retrieve_best_run(
project_id: str, job_id: str
) -> NamedTuple('Outputs', [('metric_value', float), ('alpha', float),
('max_iter', int)]):
"""Retrieves the parameters of the best Hypertune run."""
from googleapiclient import discovery
from googleapiclient import errors
ml = discovery.build('ml', 'v1')
job_name = 'projects/{}/jobs/{}'.format(project_id, job_id)
request = ml.projects().jobs().get(name=job_name)
try:
response = request.execute()
except errors.HttpError as err:
print(err)
except:
print('Unexpected error')
print(response)
best_trial = response['trainingOutput']['trials'][0]
metric_value = best_trial['finalMetric']['objectiveValue']
alpha = float(best_trial['hyperparameters']['alpha'])
max_iter = int(best_trial['hyperparameters']['max_iter'])
return (metric_value, alpha, max_iter)
def evaluate_model(
dataset_path: str, model_path: str, metric_name: str
) -> NamedTuple('Outputs', [('metric_name', str), ('metric_value', float),
('mlpipeline_metrics', 'Metrics')]):
"""Evaluates a trained sklearn model."""
#import joblib
import pickle
import json
import pandas as pd
import subprocess
import sys
from sklearn.metrics import accuracy_score, recall_score
df_test = pd.read_csv(dataset_path)
X_test = df_test.drop('Cover_Type', axis=1)
y_test = df_test['Cover_Type']
# Copy the model from GCS
model_filename = 'model.pkl'
gcs_model_filepath = '{}/{}'.format(model_path, model_filename)
print(gcs_model_filepath)
subprocess.check_call(['gsutil', 'cp', gcs_model_filepath, model_filename],
stderr=sys.stdout)
with open(model_filename, 'rb') as model_file:
model = pickle.load(model_file)
y_hat = model.predict(X_test)
if metric_name == 'accuracy':
metric_value = accuracy_score(y_test, y_hat)
elif metric_name == 'recall':
metric_value = recall_score(y_test, y_hat)
else:
metric_name = 'N/A'
metric_value = 0
# Export the metric
metrics = {
'metrics': [{
'name': metric_name,
'numberValue': float(metric_value)
}]
}
return (metric_name, metric_value, json.dumps(metrics))
| apache-2.0 |
saatvikshah1994/SmartMM | KeywordExtraction/utilities.py | 1 | 7986 | from sklearn.cross_validation import KFold
import csv
import numpy as np
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
import os
from nltk.stem import PorterStemmer
class DataClean:
"""Cleans data by inputting list of regex to search and substitute
Need to add stopword elimination support"""
def __init__(self,clean_list,html_clean = False,split_words=False):
self.clean_list = clean_list
self.html_clean = html_clean
self.split_words = split_words
self.stopwords_eng = stopwords.words("english") + [u"film",u"movie"]
def fit(self,X,y=None):
return self
def transform(self,X):
X = X.flatten()
X = map(self.clean_sentence,X)
return np.array(X)
def clean_sentence(self,sentence):
if self.html_clean:
sentence = BeautifulSoup(sentence).get_text() # removing html markup
sentence = sentence.lower() # everything to lowercase
# sentence = ''.join(x for x in sentence if x.isalnum() or x==" ")
for ch_rep in self.clean_list:
sentence = re.sub(ch_rep[0],ch_rep[1],sentence)
sentence = ' '.join(filter(lambda x:x not in self.stopwords_eng,sentence.split()))
sentence = ' '.join(filter(lambda x:len(x) > 1,sentence.split()))
sentence = sentence.strip(" ") # Remove possible extra spaces
if self.split_words:
sentence = sentence.split()
return sentence
def __repr__(self):
return "DataClean"
class CandidateSelection:
def __init__(self,method="noun_phrase_heuristic_chunks"):
assert method in ["noun_phrase_heuristic_chunks","nounadj_tags_heuristic_words"],\
"`method` must be one of `noun_phrase_heuristic_chunks`/`nounadj_tags_heuristic_words`"
self.method = method
def fit(self,X,y=None):
return self
def transform(self,X):
if self.method == "noun_phrase_heuristic_chunks":
keywords = [self.extract_candidate_chunks_noun_phrase_heuristic(text) for text in X]
else:
keywords = [self.extract_candidate_words_nounadj_tags_heuristic(text) for text in X]
return keywords
def fit_transform(self,X,y=None):
self.fit(X,y)
return self.transform(X)
def extract_candidate_chunks_noun_phrase_heuristic(self, text, grammar=r'KT: {(<JJ>* <NN.*>+ <IN>)? <JJ>* <NN.*>+}'):
import itertools, nltk, string
"""Return all those words as candidates which follow a specific pos_tag pattern"""
# exclude candidates that are stop words or entirely punctuation
punct = set(string.punctuation)
stop_words = set(nltk.corpus.stopwords.words('english'))
# tokenize, POS-tag, and chunk using regular expressions
chunker = nltk.chunk.regexp.RegexpParser(grammar)
tagged_sents = nltk.pos_tag_sents(nltk.word_tokenize(sent) for sent in nltk.sent_tokenize(text))
all_chunks = list(itertools.chain.from_iterable(nltk.chunk.tree2conlltags(chunker.parse(tagged_sent))
for tagged_sent in tagged_sents))
# join constituent chunk words into a single chunked phrase
candidates = [' '.join(word for word, pos, chunk in group).lower()
for key, group in itertools.groupby(all_chunks, lambda (word,pos,chunk): chunk != 'O') if key]
return [cand for cand in candidates
if cand not in stop_words and not all(char in punct for char in cand)]
def extract_candidate_words_nounadj_tags_heuristic(self, text, good_tags=set(['JJ','JJR','JJS','NN','NNP','NNS','NNPS'])):
"""Return all those words as candidates which are good_tags - here theyre nouns/adjectives """
import itertools, nltk, string
# exclude candidates that are stop words or entirely punctuation
punct = set(string.punctuation)
stop_words = set(nltk.corpus.stopwords.words('english'))
# tokenize and POS-tag words
tagged_words = itertools.chain.from_iterable(
nltk.pos_tag_sents(nltk.word_tokenize(sent) for sent in nltk.sent_tokenize(text)))
# filter on certain POS tags and lowercase all words
candidates = [word.lower() for word, tag in tagged_words
if tag in good_tags and word.lower() not in stop_words
and not all(char in punct for char in word)]
return candidates
def load_data(tag="semeval"):
if tag == "semeval":
data_path = "../dataset/semeval2010"
X = []
y = []
ids = []
for f in os.listdir(data_path):
f = os.path.join(data_path,f)
if f.endswith("txt"):
fname = f.replace(".txt","")
ids.append(fname)
key_file = "{}.key".format(fname)
with open(f) as articlefile:
article = articlefile.read()
X.append(article)
with open(key_file) as keywords_file:
keywords = keywords_file.readlines()
keywords_cleaned = [keyword.strip() for keyword in keywords]
y.append(keywords_cleaned)
elif tag == "imdbpy_plotkeywords":
data_path = "../dataset/imdbpy_plotkeywords.csv"
X = []
y = []
ids = []
with open(data_path) as f:
csv_f = csv.reader(f)
for row in csv_f:
num_plot_summaries = int(row[2])
plots = []
for i in xrange(num_plot_summaries):
plots.append(row[i+3])
plots = " ".join(plots)
keywords_idx = num_plot_summaries + 3
keywords = []
for i in xrange(keywords_idx,len(row)):
keyword = row[i]
keyword_alt = keyword.replace("-"," ")
if keyword_alt in plots or keyword in plots:
keywords.append(keyword_alt)
if len(keywords) > 4:
ids.append(row[0])
X.append(plots)
y.append(keywords)
else:
raise("`tag` must be one of `semeval`,`imdbpy_plotkeywords`")
return ids,np.array(X),np.array(y)
def cross_validate(data,pipeline,metric_apply,n_folds = 4,stem_y=True):
(X,y) = data
if stem_y:
stemmer = PorterStemmer()
y_stem = []
for keywords in y:
keywords_stemmed = []
for keyword in keywords:
try:
stemmed_keyword = stemmer.stem(keyword.decode('utf-8'))
keywords_stemmed.append(stemmed_keyword)
except Exception as e:
print "Error stemming keyword %s, Skipping." % keyword
y_stem.append(keywords_stemmed)
y = np.array(y_stem)
skf = KFold(len(y),n_folds=n_folds)
precision_score = []
recall_score = []
f1_score = []
metric_apply = metric_apply
counter = 0
for train_idx,val_idx in skf:
counter += 1
print "Running fold %d" % counter
print "fitting"
pipeline.fit(X[train_idx],y[train_idx])
print "predicting"
ypred = pipeline.predict(X[val_idx])
p,r,f = metric_apply(y[val_idx],ypred)
precision_score.append(p)
recall_score.append(r)
f1_score.append(f)
print metric_apply.__name__
print "{} : {} +/- {}".format("precision_score",
np.mean(precision_score),
np.std(precision_score))
print "{} : {} +/- {}".format("recall_score",
np.mean(recall_score),
np.std(recall_score))
print "{} : {} +/- {}".format("f1_score",
np.mean(f1_score),
np.std(f1_score))
| mit |
ravindrapanda/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans.py | 15 | 10904 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of `Estimator` API.
This module is deprecated. Please use
@{tf.contrib.factorization.KMeansClustering} instead of
@{tf.contrib.learn.KMeansClustering}. It has a similar interface, but uses the
@{tf.estimator.Estimator} API instead of @{tf.contrib.learn.Estimator}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.util.deprecation import deprecated
_USE_TF_CONTRIB_FACTORIZATION = (
'Please use tf.contrib.factorization.KMeansClustering instead of'
' tf.contrib.learn.KMeansClustering. It has a similar interface, but uses'
' the tf.estimator.Estimator API instead of tf.contrib.learn.Estimator.')
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering."""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
@deprecated(None, _USE_TF_CONTRIB_FACTORIZATION)
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
| apache-2.0 |
samuelmanzer/interpolation | chebyshev_nodes.py | 1 | 3491 | #!/usr/bin/env python
###############################################################################
# Interpolation
# Copyright (C) Samuel F. Manzer. All rights reserved.
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
#
# FILE: lagrange_interpolation.py
# AUTHOR: Samuel F. Manzer
# URL: http://www.samuelmanzer.com/
###############################################################################
from argparse import ArgumentParser
import numpy as np
import matplotlib.pyplot as plt
import itertools
import tempfile
import pdb
from lagrange_poly import *
parser = ArgumentParser("Produces plots of Lagrange interpolation of cos(x) for various numbers of Chebyshev and equally spaced points")
args = parser.parse_args()
start = 0
end = (5*math.pi)/2
n_eval_pts = 1000
eval_step_size = float(end-start)/n_eval_pts
n_points_range = range(2,6,1)
x = np.linspace(start,end,n_eval_pts)
y_exact = np.cos(x)
f1,(ax1,ax2) = plt.subplots(1,2,sharey=True)
ax1.set_ylim(ymin=-1.1,ymax=1.5)
ax1.set_title("Equally-Spaced")
ax2.set_title("Chebyshev")
f2,ax3 = plt.subplots()
f3,ax4 = plt.subplots()
# Equally spaced points
evenly_spaced_sets = [np.linspace(start,end,n_points) for n_points in n_points_range]
evenly_spaced_polys = [get_lagrange_poly(interp_points,math.cos) for interp_points in evenly_spaced_sets]
lines,mae_list,rmsd_list,maxe_list = plot_lagrange_polys(x,n_points_range,evenly_spaced_polys,y_exact,ax1)
texts_1 = plot_stats(mae_list,rmsd_list,maxe_list,n_points_range,ax3)
f1.legend(lines,map(lambda x: str(x)+" Points",n_points_range)+["cos(x)"],loc="upper right")
# Chebyshev points - we must transform them to our interval
cp_sets = [ [ math.cos((float(2*k - 1)/(2*n))*math.pi) for k in range(1,n+1)] for n in n_points_range ]
tcp_sets = [ [ 0.5*((end - start)*pt + start + end) for pt in point_set] for point_set in cp_sets]
chebyshev_point_polys = [get_lagrange_poly(interp_points,math.cos) for interp_points in tcp_sets]
lines,mae_list,rmsd_list,maxe_list = plot_lagrange_polys(x,n_points_range,chebyshev_point_polys,y_exact,ax2)
texts_2 = plot_stats(mae_list,rmsd_list,maxe_list,n_points_range,ax4)
ax3.set_title("Lagrange Interpolation with Equally-Spaced Points")
ax4.set_title("Lagrange Interpolation with Chebyshev Points")
# Awful haxx for text labels above bars to not get cut off by top of figure
tmp_file = tempfile.NamedTemporaryFile()
f2.savefig(tmp_file.name)
f3.savefig(tmp_file.name)
renderer_2 = f2.axes[0].get_renderer_cache()
renderer_3 = f3.axes[0].get_renderer_cache()
for (ax,renderer,texts) in [(ax3,renderer_2,texts_1),(ax4,renderer_3,texts_2)]:
window_bbox_list = [t.get_window_extent(renderer) for t in texts]
data_bbox_list = [b.transformed(ax.transData.inverted()) for b in window_bbox_list]
data_coords_list = [b.extents for b in data_bbox_list]
heights = [ coords[-1] for coords in data_coords_list]
ax.set_ylim(ymax=max(heights)*1.05)
plt.show()
| lgpl-3.0 |
garbersc/keras-galaxies | extract_pysex_params_gen2.py | 8 | 3889 | import load_data
import pysex
import numpy as np
import multiprocessing as mp
import cPickle as pickle
"""
Extract a bunch of extra info to get a better idea of the size of objects
"""
SUBSETS = ['train', 'test']
TARGET_PATTERN = "data/pysex_params_gen2_%s.npy.gz"
SIGMA2 = 5000 # 5000 # std of the centrality weighting (Gaussian)
DETECT_THRESH = 2.0 # 10.0 # detection threshold for sextractor
NUM_PROCESSES = 8
def estimate_params(img):
img_green = img[..., 1] # supposedly using the green channel is a good idea. alternatively we could use luma.
# this seems to work well enough.
out = pysex.run(img_green, params=[
'X_IMAGE', 'Y_IMAGE', # barycenter
# 'XMIN_IMAGE', 'XMAX_IMAGE', 'YMIN_IMAGE', 'YMAX_IMAGE', # enclosing rectangle
# 'XPEAK_IMAGE', 'YPEAK_IMAGE', # location of maximal intensity
'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', # ellipse parameters
'PETRO_RADIUS',
# 'KRON_RADIUS', 'PETRO_RADIUS', 'FLUX_RADIUS', 'FWHM_IMAGE', # various radii
], conf_args={ 'DETECT_THRESH': DETECT_THRESH })
# x and y are flipped for some reason.
# theta should be 90 - theta.
# we convert these here so we can plot stuff with matplotlib easily.
try:
ys = out['X_IMAGE'].tonumpy()
xs = out['Y_IMAGE'].tonumpy()
as_ = out['A_IMAGE'].tonumpy()
bs = out['B_IMAGE'].tonumpy()
thetas = 90 - out['THETA_IMAGE'].tonumpy()
# kron_radii = out['KRON_RADIUS'].tonumpy()
petro_radii = out['PETRO_RADIUS'].tonumpy()
# flux_radii = out['FLUX_RADIUS'].tonumpy()
# fwhms = out['FWHM_IMAGE'].tonumpy()
# detect the most salient galaxy
# take in account size and centrality
surface_areas = np.pi * (as_ * bs)
centralities = np.exp(-((xs - 211.5)**2 + (ys - 211.5)**2)/SIGMA2) # 211.5, 211.5 is the center of the image
# salience is proportional to surface area, with a gaussian prior on the distance to the center.
saliences = surface_areas * centralities
most_salient_idx = np.argmax(saliences)
x = xs[most_salient_idx]
y = ys[most_salient_idx]
a = as_[most_salient_idx]
b = bs[most_salient_idx]
theta = thetas[most_salient_idx]
# kron_radius = kron_radii[most_salient_idx]
petro_radius = petro_radii[most_salient_idx]
# flux_radius = flux_radii[most_salient_idx]
# fwhm = fwhms[most_salient_idx]
except TypeError: # sometimes these are empty (no objects found), use defaults in that case
x = 211.5
y = 211.5
a = np.nan # dunno what this has to be, deal with it later
b = np.nan # same
theta = np.nan # same
# kron_radius = np.nan
petro_radius = np.nan
# flux_radius = np.nan
# fwhm = np.nan
# return (x, y, a, b, theta, flux_radius, kron_radius, petro_radius, fwhm)
return (x, y, a, b, theta, petro_radius)
for subset in SUBSETS:
print "SUBSET: %s" % subset
print
if subset == 'train':
num_images = load_data.num_train
ids = load_data.train_ids
elif subset == 'test':
num_images = load_data.num_test
ids = load_data.test_ids
def process(k):
print "image %d/%d (%s)" % (k + 1, num_images, subset)
img_id = ids[k]
img = load_data.load_image(img_id, from_ram=True, subset=subset)
return estimate_params(img)
pool = mp.Pool(NUM_PROCESSES)
estimated_params = pool.map(process, xrange(num_images), chunksize=100)
pool.close()
pool.join()
# estimated_params = map(process, xrange(num_images)) # no mp for debugging
params_array = np.array(estimated_params)
target_path = TARGET_PATTERN % subset
print "Saving to %s..." % target_path
load_data.save_gz(target_path, params_array)
| bsd-3-clause |
jonathanstrong/tmetrics | tmetrics/classification.py | 1 | 14853 | import theano, theano.tensor as T
import numpy as np
import pandas as pd
import lasagne
"""
note: we are following the sklearn api for metrics/loss functions,
where the first arg for a function is y true, and second value is
y predicted. this is the opposite of the theano functions, so just
keep in mind.
"""
#copy existing code and place in tmetrics namespace
multiclass_hinge_loss = lambda yt, yp: lasagne.objectives.multiclass_hinge_loss(yp, yt)
squared_error = lambda yt, yp: lasagne.objectives.squared_error(yp, yt)
binary_accuracy = lambda yt, yp: lasagne.objectives.binary_accuracy(yp, yt)
categorical_accuracy = lambda yt, yp: lasagne.objectives.categorical_accuracy(yp, yt)
def binary_crossentropy(y_true, y_predicted):
"""
wrapper of theano.tensor.nnet.binary_crossentropy
args reversed to match tmetrics api
"""
return T.nnet.binary_crossentropy(y_predicted, y_true)
def categorical_crossentropy(y_true, y_predicted):
"""
wrapper of theano.tensor.nnet.categorical_crossentropy
args reversed to match tmetrics api
"""
return T.nnet.binary_crossentropy(y_predicted, y_true)
def binary_hinge_loss(y_true, y_predicted, binary=True, delta=1):
"""
wrapper of lasagne.objectives.binary_hinge_loss
args reversed to match tmetrics api
"""
return lasagne.objectives.binary_hinge_loss(y_predicted, y_true, binary, delta)
def brier_score_loss(y_true, y_predicted, sample_weight=None):
"""
port of sklearn.metrics.brier_score_loss
works for 2D binary data as well, e.g.
y_true: [[0, 1, 0],
[1, 0, 0]]
y_predicted: [[.1, .9, .3],
[.4, .7, .2]]
y_true: tensor, y true (binary)
y_predicted: tensor, y predicted (float between 0 and 1)
sample_weight: tensor or None (standard mean)
assumptions:
-binary ground truth values ({0, 1}); no pos_label
training wheels like sklearn or figuring out how to
run this on text labels.
-probabilities are floats between 0-1
-sample_weight broadcasts to ((y_true - y_predicted) ** 2)
"""
scores = ((y_true - y_predicted) ** 2)
if sample_weight is not None:
scores = scores * sample_weight
return scores.mean()
def hamming_loss(y_true, y_predicted):
"""
note - works on n-dim arrays, means across the final axis
note - we round predicted because float probabilities would not work
"""
return T.neq(y_true, T.round(y_predicted)).astype(theano.config.floatX).mean(axis=-1)
def jaccard_similarity(y_true, y_predicted):
"""
y_true: tensor ({1, 0})
y_predicted: tensor ({1, 0})
note - we round predicted because float probabilities would not work
"""
y_predicted = T.round(y_predicted).astype(theano.config.floatX)
either_nonzero = T.or_(T.neq(y_true, 0), T.neq(y_predicted, 0))
return T.and_(T.neq(y_true, y_predicted), either_nonzero).sum(axis=-1, dtype=theano.config.floatX) / either_nonzero.sum(axis=-1, dtype=theano.config.floatX)
def _nbool_correspond_all(u, v):
"""
port of scipy.spatial.distance._nbool_correspond_all
with dtype assumed to be integer/float (no bool in theano)
sums are on last axis
"""
not_u = 1.0 - u
not_v = 1.0 - v
nff = (not_u * not_v).sum(axis=-1, dtype=theano.config.floatX)
nft = (not_u * v).sum(axis=-1, dtype=theano.config.floatX)
ntf = (u * not_v).sum(axis=-1, dtype=theano.config.floatX)
ntt = (u * v).sum(axis=-1, dtype=theano.config.floatX)
return (nff, nft, ntf, ntt)
def kulsinski_similarity(y_true, y_predicted):
y_predicted = T.round(y_predicted)
nff, nft, ntf, ntt = _nbool_correspond_all(y_true, y_predicted)
n = y_true.shape[0].astype('float32')
return (ntf + nft - ntt + n) / (ntf + nft + n)
def trapz(y, x=None, dx=1.0, axis=-1):
"""
reference implementation: numpy.trapz
---------
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
if x is None:
d = dx
else:
if x.ndim == 1:
d = T.extra_ops.diff(x)
# reshape to correct shape
shape = T.ones(y.ndim, dtype='int8')
shape = T.set_subtensor(shape[axis], d.shape[0])
d = d.reshape(shape)
else:
d = T.extra_ops.diff(x, axis=axis)
nd = y.ndim
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
return (d * (y[slice1] + y[slice2]) / 2.0).sum(axis)
def auc(x, y):
return abs(trapz(y, x))
#def roc_curve(y_true, y_predicted):
# fps, tps, thresholds = _binary_clf_curve(y_true, y_predicted)
# fpr = fps.astype('float32') / fps[-1]
# tpr = tps.astype('float32') / tps[-1]
# return fpr, tpr, thresholds
#
#def roc_auc_score(y_true, y_predicted):
# fpr, tpr, thresholds = roc_curve(y_true, y_predicted)
# return auc(fpr, tpr)
def _last_axis_binary_clf_curve(y_true, y_predicted):
"""
returns y_predicted.shape[-2] binary clf curves calculated axis[-1]-wise
this is a numpy implementation
"""
assert y_true.shape == y_predicted.shape
axis = -1
sort_idx = list(np.ogrid[[slice(x) for x in y_predicted.shape]])
sort_idx[axis] = y_predicted.argsort(axis=axis).astype('int8')
reverse = [slice(None)] * y_predicted.ndim
reverse[axis] = slice(None, None, -1)
sorted_y_predicted = y_predicted[sort_idx][reverse]
sorted_y_true = y_true[sort_idx][reverse]
tps = sorted_y_true.cumsum(axis=axis)
count = (np.ones(y_predicted.shape) * np.arange(y_predicted.shape[-1]))
fps = 1 + count - tps
threshold_values = sorted_y_predicted
return fps, tps, threshold_values
def last_axis_roc_curve(y_true, y_predicted):
"numpy implementation"
fps, tps, thresholds = _last_axis_binary_clf_curve(y_true, y_predicted)
i = [slice(None)] * fps.ndim
i[-1] = -1
fpr = fps.astype('float32') / np.expand_dims(fps[i], axis=-1)
tpr = tps.astype('float32') / np.expand_dims(tps[i], axis=-1)
#tpr = tps.astype('float32') / tps[i][:, np.newaxis]
return fpr, tpr, thresholds
def last_axis_roc_auc_scores(y_true, y_predicted):
fpr, tpr, _ = last_axis_roc_curve(y_true, y_predicted)
return np.trapz(tpr, fpr)
def _vector_clf_curve(y_true, y_predicted):
"""
sklearn.metrics._binary_clf_curve port
y_true: tensor (vector): y true
y_predicted: tensor (vector): y predicted
returns: fps, tps, threshold_values
fps: tensor (vector): false positivies
tps: tensor (vector): true positives
threshold_values: tensor (vector): value of y predicted at each threshold
along the curve
restrictions:
-not numpy compatible
-only works with two vectors (not matrix or tensor)
"""
assert y_true.ndim == y_predicted.ndim == 1
desc_score_indices = y_predicted.argsort()[::-1].astype('int8')
sorted_y_predicted = y_predicted[desc_score_indices]
sorted_y_true = y_true[desc_score_indices]
distinct_value_indices = (1-T.isclose(T.extra_ops.diff(sorted_y_predicted), 0)).nonzero()[0]
curve_cap = T.extra_ops.repeat(sorted_y_predicted.size - 1, 1)
threshold_indices = T.concatenate([distinct_value_indices, curve_cap]).astype('int8')
tps = T.extra_ops.cumsum(sorted_y_true[threshold_indices])
fps = 1 + threshold_indices - tps
threshold_values = sorted_y_predicted[threshold_indices]
return fps, tps, threshold_values
def _matrix_clf_curve(y_true, y_predicted):
assert y_true.ndim == y_predicted.ndim == 2
row_i = T.arange(y_true.shape[0], dtype='int8').dimshuffle(0, 'x')
col_i = y_predicted.argsort().astype('int8')
reverse = [slice(None), slice(None, None, -1)]
y_true = y_true[row_i, col_i][reverse]
y_predicted = y_predicted[row_i, col_i][reverse]
tps = y_true.cumsum(axis=-1)
counts = T.ones_like(y_true) * T.arange(y_predicted.shape[-1], dtype='int8')
fps = 1 + counts - tps
return fps, tps, y_predicted
def _tensor3_clf_curve(y_true, y_predicted):
assert y_true.ndim == y_predicted.ndim == 3
x_i = T.arange(y_true.shape[0], dtype='int8').dimshuffle(0, 'x', 'x')
y_i = T.arange(y_true.shape[1], dtype='int8').dimshuffle('x', 0, 'x')
z_i = y_predicted.argsort().astype('int8')
reverse = [slice(None), slice(None), slice(None, None, -1)]
y_true = y_true[x_i, y_i, z_i][reverse]
y_predicted = y_predicted[x_i, y_i, z_i][reverse]
tps = y_true.cumsum(axis=-1)
counts = T.ones_like(y_true) * T.arange(y_predicted.shape[-1], dtype='int8')
fps = 1 + counts - tps
return fps, tps, y_predicted
def _tensor4_clf_curve(y_true, y_predicted):
assert y_true.ndim == y_predicted.ndim == 4
a_i = T.arange(y_true.shape[0], dtype='int8').dimshuffle(0, 'x', 'x', 'x')
b_i = T.arange(y_true.shape[1], dtype='int8').dimshuffle('x', 0, 'x', 'x')
c_i = T.arange(y_true.shape[2], dtype='int8').dimshuffle('x', 'x', 0, 'x')
d_i = y_predicted.argsort().astype('int8')
reverse = [slice(None), slice(None), slice(None), slice(None, None, -1)]
y_true = y_true[a_i, b_i, c_i, d_i][reverse]
y_predicted = y_predicted[a_i, b_i, c_i, d_i][reverse]
tps = y_true.cumsum(axis=-1)
counts = T.ones_like(y_true) * T.arange(y_predicted.shape[-1], dtype='int8')
fps = 1 + counts - tps
return fps, tps, y_predicted
def _binary_clf_curves(y_true, y_predicted):
"""
returns curves calculated axis[-1]-wise
note - despite trying several approaches, could not seem to get a
n-dimensional verision of clf_curve to work, so abandoning. 2,3,4 is fine.
"""
if not (y_true.ndim == y_predicted.ndim):
raise ValueError('Dimension mismatch, ({}, {})'.format(y_true.ndim, y_predicted.ndim))
if not isinstance(y_true, T.TensorVariable) or not isinstance(y_predicted, T.TensorVariable):
raise TypeError('This only works for symbolic variables.')
if y_true.ndim == 1:
clf_curve_fn = _vector_clf_curve
elif y_true.ndim == 2:
clf_curve_fn = _matrix_clf_curve
elif y_true.ndim == 3:
clf_curve_fn = _tensor3_clf_curve
elif y_true.ndim == 4:
clf_curve_fn = _tensor4_clf_curve
else:
raise NotImplementedError('Not implemented for ndim {}'.format(y_true.ndim))
fps, tps, thresholds = clf_curve_fn(y_true, y_predicted)
return fps, tps, thresholds
def _last_col_idx(ndim):
last_col = [slice(None) for x in xrange(ndim)]
last_col[-1] = -1
return last_col
def _reverse_idx(ndim):
reverse = [slice(None) for _ in range(ndim-1)]
reverse.append(slice(None, None, -1))
return reverse
def roc_curves(y_true, y_predicted):
"returns roc curves calculated axis -1-wise"
fps, tps, thresholds = _binary_clf_curves(y_true, y_predicted)
last_col = _last_col_idx(y_true.ndim)
fpr = fps.astype('float32') / T.shape_padright(fps[last_col], 1)
tpr = tps.astype('float32') / T.shape_padright(tps[last_col], 1)
return fpr, tpr, thresholds
def roc_auc_scores(y_true, y_predicted):
"roc auc scores calculated axis -1-wise"
fpr, tpr, thresholds = roc_curves(y_true, y_predicted)
return auc(fpr, tpr)
def roc_auc_loss(y_true, y_predicted):
return 1-roc_auc_scores(y_true, y_predicted)
def precision_recall_curves(y_true, y_predicted):
"precision recall curves calculated axis -1-wise"
fps, tps, thresholds = _binary_clf_curves(y_true, y_predicted)
last_col = _last_col_idx(y_true.ndim)
last_col[-1] = np.asarray([-1], dtype='int8')
precision = tps.astype('float32') / (tps + fps)
if y_true.ndim == 1:
recall = tps.astype('float32') / tps[-1]
else:
recall = tps.astype('float32') / tps[last_col]
reverse = _reverse_idx(fps.ndim)
precision = precision[reverse]
recall = recall[reverse]
thresholds = thresholds[reverse]
if y_true.ndim == 1:
ones, zeros = np.asarray([1], dtype='float32'), np.asarray([0], dtype='float32')
else:
ones = T.ones_like(precision)[last_col]
zeros = T.zeros_like(recall)[last_col]
precision = T.concatenate([precision, ones], axis=-1)
recall = T.concatenate([recall, zeros], axis=-1)
return precision, recall, thresholds
def average_precision_scores(y_true, y_predicted):
precision, recall, _ = precision_recall_curves(y_true, y_predicted)
return auc(recall, precision)
def precision_recall_loss(y_true, y_predicted):
"convenience function to minimize for"
return 1-average_precision_scores(y_true, y_predicted)
def last_axis_precision_recall_curve(y_true, y_predicted):
fps, tps, thresholds = _last_axis_binary_clf_curve(y_true, y_predicted)
i = [slice(None)] * fps.ndim
i[-1] = [-1]
precision = tps.astype('float32') / (tps + fps)
recall = tps.astype('float32') / tps[i]
i[-1] = slice(None, None, -1)
precision = precision[i]
recall = recall[i]
thresholds = thresholds[i]
i[-1] = [-1]
precision = np.concatenate([precision, np.ones(precision.shape)[i]], axis=-1)
recall = np.concatenate([recall, np.zeros(recall.shape)[i]], axis=-1)
return precision, recall, thresholds
#aliases
roc_curve = roc_curves
roc_auc_score = roc_auc_scores
precision_recall_curve = precision_recall_curves
average_precision_score = average_precision_scores
_binary_clf_curve = _binary_clf_curves
| mit |
cpatrickalves/simprev | modelos/modulos_fazenda/depesas.py | 1 | 20278 | # -*- coding: utf-8 -*-
"""
@author: Patrick Alves
"""
from util.tabelas import LerTabelas
import pandas as pd
# Calcula despesas com benefícios
# Baseado nas Equações da LDO de 2018 e Planilhas do MF
def calc_despesas(despesas, estoques, concessoes, valCoBen, salarios, valMedBenef, probabilidades, nparcelas, resultados, parametros):
periodo = parametros['periodo']
# Objeto criado para uso das funções da Classe LerTabelas
dados = LerTabelas()
ult_ano_estoq = periodo[0]-1 # 2014
##### Calcula despesa com o dados conhecidos (2011-2014)
# O valor no banco de dados é mensal
for beneficio in despesas.keys():
# Auxílio doença para os que recebem Acima do Piso
if 'AuxdUrbAcim' in beneficio:
despesas[beneficio] = valCoBen[beneficio] * nparcelas[beneficio]
# Aposentadorias e Pensões para quem recebe acima do piso
elif 'Acim' in beneficio:
desp_dez = despesas[beneficio] # despesas dos mes de dezembro
despesas[beneficio] = desp_dez * nparcelas[beneficio]
# Demais auxílios
elif 'Aux' in beneficio:
qtd_benef = 0
if 'Auxd' in beneficio:
qtd_benef = concessoes[beneficio][ult_ano_estoq]
else:
qtd_benef = estoques[beneficio][ult_ano_estoq]
# OBS: Para o Auxílio-acidente a regra é 50% do valor do “Salário de Benefício”
# fonte: http://www.previdencia.gov.br/servicos-ao-cidadao/informacoes-gerais/valor-beneficios-incapacidade/
if 'Auxa' in beneficio:
valor_benef = salarios['salarioMinimo'][ult_ano_estoq] * 0.5
else:
valor_benef = salarios['salarioMinimo'][ult_ano_estoq]
npar = nparcelas[beneficio][ult_ano_estoq]
# Calcula a despesa para cada benefício
despesas[beneficio][ult_ano_estoq] = qtd_benef * valor_benef * npar
# Demais tipos
else:
estoq_total = estoques[beneficio][ult_ano_estoq]
estoq_total_ano_ant = estoques[beneficio][ult_ano_estoq-1]
valor_benef = salarios['salarioMinimo'][ult_ano_estoq]
npar = nparcelas[beneficio][ult_ano_estoq]
estoq_medio = ((estoq_total + estoq_total_ano_ant)/2)
# Calcula a despesa para cada benefício (Eq. 44)
despesas[beneficio][ult_ano_estoq] = estoq_medio * valor_benef * npar
##### Calcula despesas para clientelas Rurais, Urbanas e assistenciais que recebem o Piso (1 SM) #####
for clientela in ['Rur', 'Piso', 'Rmv', 'Loas']:
beneficios = dados.get_id_beneficios(clientela)
for beneficio in beneficios:
# Pula o SalMat pois o calculo é diferente e feito posteriormente
if 'SalMat' in beneficio:
continue
# Verifica se existe estoque para o beneficio
if beneficio in estoques:
for ano in periodo:
# verifica se existe projeção para esse ano
if ano in estoques[beneficio].columns:
# Calculo para Auxílios
if 'Aux' in beneficio:
qtd_benef = estoques[beneficio][ano]
valor_benef = salarios['salarioMinimo'][ano]
# OBS: Para o Auxílio-acidente a regra é 50% do valor do “Salário de Benefício”
# fonte: http://www.previdencia.gov.br/servicos-ao-cidadao/informacoes-gerais/valor-beneficios-incapacidade/
if 'Auxa' in beneficio:
valor_benef = salarios['salarioMinimo'][ano] * 0.5
npar = nparcelas[beneficio][ano]
# Calcula a despesa para cada benefício
despesas[beneficio][ano] = qtd_benef * valor_benef * npar
# Cálculo para os demais
else:
# Obtem o estoques do ano e do ano anterior
estoq_total = estoques[beneficio][ano]
estoq_total_ano_ant = estoques[beneficio][ano-1]
valor_benef = salarios['salarioMinimo'][ano]
npar = nparcelas[beneficio][ano]
# Calcula a despesa para cada benefício (Eq. 44)
despesas[beneficio][ano] = ((estoq_total + estoq_total_ano_ant)/2) * valor_benef * npar
##### Calcula despesas para clientela Urbana que recebe acima do Piso #####
for beneficio in dados.get_id_beneficios('Acim'):
# Pula o SalMat pois o calculo é diferente e feito posteriormente
if 'SalMat' in beneficio:
continue
# Verifica se existem estoques
if beneficio in estoques:
sexo = beneficio[-1]
# Caso o benefício seja uma Apos. por Tempo de
# Contribuição Normal, Professor ou Especial
# Eq. 49 e 50
#if ('tcn' in beneficio or 'tce' in beneficio or 'tcp' in beneficio):
#fator_prev = 1
#ajuste = 1
#val_med_novos_ben = fator_prev * ajuste * salarios['SalMedSegUrbAcimPnad'+sexo]
for ano in periodo:
if ano in estoques[beneficio].columns: # verifica se existe projeção para esse ano
# Cálculo das despesas com os Auxílios
if 'Aux' in beneficio:
est_ano = estoques[beneficio][ano]
vmb = valMedBenef[beneficio][ano]
npar = nparcelas[beneficio][ano]
# Eq. 46
despesas[beneficio][ano] = est_ano * vmb * npar
else:
# Cálculo para Aposentadorias e Pensões
val_med_novos_ben = valMedBenef[beneficio]
# Idade de 1 a 90 anos
for idade in range(1,91):
# Para a idade de 90 anos
if idade == 90:
desp_anterior = despesas[beneficio][ano-1][idade-1] + despesas[beneficio][ano-1][idade]
else:
desp_anterior = despesas[beneficio][ano-1][idade-1]
conc_anterior = concessoes[beneficio][ano-1][idade-1]
# OBS: Acredito que o correto seria usar os segurados e nao a PopOcup
# OBS: O rend_med_ocup_ant já inclui a taxa de reposição da Eq. 45
valor_med_conc_ant = val_med_novos_ben[ano-1][idade-1]
npar = nparcelas[beneficio][ano]
npar_ant = nparcelas[beneficio][ano-1]
prob_morte = probabilidades['Mort'+sexo][ano][idade]
fam = probabilidades['fam'+beneficio][ano][idade]
# Nas planilhas usa-se o termo Atualização Monetária
reajuste = parametros['tx_reajuste_beneficios'][ano]
novas_conc = concessoes[beneficio][ano][idade]
valor_med_conc = val_med_novos_ben[ano][idade]
# Eq. 45
part1 = desp_anterior + conc_anterior * valor_med_conc_ant * (npar_ant/2)
part2 = (1 - prob_morte * fam) * (1 + reajuste/100)
part3 = (novas_conc * valor_med_conc * (npar/2))
despesas[beneficio].loc[idade, ano] = part1 * part2 + part3
# Idade zero
novas_conc = concessoes[beneficio][ano][0]
valor_med_conc = val_med_novos_ben[ano][0]
npar = nparcelas[beneficio][ano]
despesas[beneficio].loc[0, ano] = novas_conc * valor_med_conc * (npar/2)
##### Calcula despesas para o Salário Maternidade #####
for beneficio in dados.get_id_beneficios('SalMat'):
# 2014-2060
anos = [periodo[0]-1] + periodo
# Verifica se existe estoque para o beneficio
if beneficio in estoques:
# Objeto do tipo Series que armazena as despesas acumuladas por ano
desp_acumulada = pd.Series(0.0, index=anos)
# Obtem o estoques acumulados do ano atual
for ano in anos:
estoq_total = estoques[beneficio][ano]
# se a clientela for UrbAcim
if 'Acim' in beneficio:
valor_benef = valMedBenef['SalMatUrbAcimM'][ano]
else:
valor_benef = salarios['salarioMinimo'][ano]
npar = nparcelas[beneficio][ano]
# OBS: A LDO não descreve a equação para o calculo de despesas para o SalMat
desp_acumulada[ano] = estoq_total * valor_benef * npar
# Salva no DataFrame
despesas[beneficio] = desp_acumulada
##### Calcula a despesa total #####
anos = [periodo[0]-1] + periodo #2014-2060
desp_total = pd.Series(0.0, index=anos) # Objeto do tipo Serie que armazena a despesa total
desp_total_urb = pd.Series(0.0, index=anos) # Despesa total Urbana
desp_total_rur = pd.Series(0.0, index=anos) # Despesa total Rural
for ano in anos:
for beneficio in despesas.keys():
# O objeto que armazena as despesas com Salário Maternidade é diferente
if 'SalMat' in beneficio:
if ano in despesas[beneficio].index: # verifica se existe projeção para esse ano
if 'Urb' in beneficio: # Separa despesa Urbana e Rural
desp_total_urb[ano] += despesas[beneficio][ano]
else:
desp_total_rur[ano] += despesas[beneficio][ano]
else:
# Calculo para os demais benefícios
if ano in despesas[beneficio].columns: # verifica se existe projeção para esse ano
if 'Urb' in beneficio: # Separa despesa Urbana e Rural
desp_total_urb[ano] += despesas[beneficio][ano].sum()
else:
desp_total_rur[ano] += despesas[beneficio][ano].sum()
desp_total = desp_total_urb + desp_total_rur
# Calcula a taxa de crescimento da Despesa
tx_cres_desp = pd.Series(0.0, index=periodo)
for ano in periodo: # pula o primeiro ano
tx_cres_desp[ano] = desp_total[ano]/desp_total[ano-1] - 1
resultados['despesas'] = desp_total
resultados['despesas_urb'] = desp_total_urb
resultados['despesas_rur'] = desp_total_rur
resultados['tx_cres_despesa'] = tx_cres_desp
return resultados
# Calcula o número de parcelas paga por ano por um benefício
# Existe uma descrição de cálculo na seção 4.6 da LDO (pag. 43)
# Porém, são necessários o valores totaais e despesas por beneficios para fazer esse cálculo
# como só temos dados do mês de dezembro, os valores foram fixados manualmente
# OBS: Valores obtidos das planilhas do MF
def calc_n_parcelas(estoques, despesa, valMedBenef, periodo):
# ano_estoq = periodo[0]-1 # 2014
dados = LerTabelas()
# Dicionário que armazena o número médio de parcelas para tipo de beneficio
n_parcelas = {}
# 2014
ano_estoque = periodo[0]-1
# 2014-2060
anos = [ano_estoque] + periodo
# Aposentadorias Idade Normal
for benef in dados.get_id_beneficios('Apin'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.95 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.82 # 2016-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.7 # 2014-2015
n_parcelas[benef].loc[periodo[1]:] = 12.95 # 2016-2060
# Aposentadorias TC Normal
for benef in dados.get_id_beneficios('Atcn'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]:] = 12.92 # 2015-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 11.7 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.0 # 2016-2060
# Aposentadorias Idade Deficiente
for benef in dados.get_id_beneficios('Apid'):
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
# Aposentadorias TC Professor
for benef in dados.get_id_beneficios('Atcp'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 13.46 # 2015
n_parcelas[benef].loc[periodo[1]:] = 14.5 # 2016-2060
# Aposentadorias Invalidez
for benef in dados.get_id_beneficios('Ainv'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 13.09 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.96 # 2016-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.3 # 2015
n_parcelas[benef].loc[periodo[1]:] = 11.9 # 2016-2060
# Aposentadorias TC especial
for benef in dados.get_id_beneficios('Atce'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.5 # 2015
n_parcelas[benef].loc[periodo[1]:] = 13.6 # 2016-2060
# Aposentadorias TC Deficiente
for benef in dados.get_id_beneficios('Atcd'):
n_parcelas[benef] = pd.Series(13.0, index=anos) # 2014-2060
# Pensões
for benef in dados.get_id_beneficios('Pe'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.97 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.89 # 2016-2060
else:
n_parcelas[benef] = pd.Series(13.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.70 # 2015
n_parcelas[benef].loc[periodo[1]:] = 13.10 # 2016-2060
# Auxilios Doença
for benef in dados.get_id_beneficios('Auxd'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 11.83 # 2015-2015
n_parcelas[benef].loc[periodo[1]:] = 13.32 # 2016-2060
else:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 8.33 # 2015
n_parcelas[benef].loc[periodo[1]:] = 9.01 # 2016-2060
# Auxilios Acidente
for benef in dados.get_id_beneficios('Auxa'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.99 # 2015
n_parcelas[benef].loc[periodo[1]:] = 13.46 # 2016-2060
else:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.43 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.56 # 2016-2060
# Auxilios Reclusão
for benef in dados.get_id_beneficios('Auxr'):
# Rurais e Urbanos tem valores diferentes
if 'Rur' in benef:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.06 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.18 # 2016-2060
else:
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.31 # 2015
n_parcelas[benef].loc[periodo[1]:] = 14.03 # 2016-2060
# Salario Maternidade
for benef in dados.get_id_beneficios('SalMat'):
n_parcelas[benef] = pd.Series(4.0, index=anos) # 2014-2060
# Assistenciais LoasDef
for benef in dados.get_id_beneficios('LoasDef'):
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.05 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.00 # 2016-2060
# Assistenciais LoasIdo
for benef in dados.get_id_beneficios('LoasIdo'):
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 11.96 # 2015
n_parcelas[benef].loc[periodo[1]:] = 11.73 # 2016-2060
# Assistenciais RMV
for benef in dados.get_id_beneficios('Rmv'):
n_parcelas[benef] = pd.Series(12.0, index=anos)
n_parcelas[benef].loc[periodo[0]] = 12.09 # 2015
n_parcelas[benef].loc[periodo[1]:] = 12.06 # 2016-2060
# for beneficio in estoques.keys():
# # Verifica se existe dados de despesa para o beneficio
# if beneficio in despesa.keys():
# desp = despesa[beneficio][ano_estoq].sum()
# est = estoques[beneficio][ano_estoq].sum()
# vm = valMedBenef[beneficio][ano_estoq].mean()
# n_parcelas[beneficio] = Dt/(vm*est)
return n_parcelas
| gpl-3.0 |
JoeriHermans/dist-keras | examples/kafka_producer.py | 3 | 1754 | """
This example will be used as a Kafka producer to generate dummy
data for our Spark Streaming example.
"""
## BEGIN Imports. ##############################################################
from kafka import *
import sys
import pandas
import time
import json
## END Imports. ################################################################
def usage():
print("Distributed Keras Example: Kafka Producer")
print("")
print("Usage:")
print("python kafka_producer.py [bootstrap_server]")
exit(0)
def allocate_producer(bootstrap_server):
producer = KafkaProducer(bootstrap_servers=[bootstrap_server])
return producer
def read_data():
path = 'data/atlas_higgs.csv'
data = []
# Use Pandas to infer the types.
data = pandas.read_csv(path)
# Remove the unneeded columns.
del data['Label']
del data['Weight']
# Convert the data to a list of dictionaries.
data = data.transpose().to_dict().values()
return data
def produce(producer, topic, data):
for row in data:
producer.send(topic, json.dumps(row))
def main():
# Check if the required number of arguments has been specified.
if len(sys.argv) != 2:
usage()
# Fetch the bootstrap server from the arguments.
bootstrap_server = sys.argv[1]
# Allocate the producer.
producer = allocate_producer(bootstrap_server)
# Read the data from the CSV file.
data = read_data()
iteration = 1
# Transmit the data in a continous loop while waiting for 5 seconds after every iteration.
while True:
print("Iteration " + str(iteration) + ".")
produce(producer, 'Machine_Learning', data)
iteration += 1
time.sleep(5)
if __name__ == "__main__":
main()
| gpl-3.0 |
themech/Machine-Learning-Coursera-Tensorflow | ex3-multi-class-classification/2_neural_networks.py | 1 | 1500 | # This classifies the same digit as the logistic regression classifiers from
# the first step. But here we're using a pre-trained neural network classifier
# (loaded from data/ex3weights.mat)
import numpy as np
from scipy import io
from sklearn import metrics
# Load the data.
filename = 'data/ex3data1.mat'
data = io.loadmat(filename)
X_data, Y_data = data['X'], data['y']
numSamples = X_data.shape[0]
# Add a 'constant' to each of the rows.
X_data = np.insert(X_data, 0, 1, axis=1)
print("X_data shape ", X_data.shape, ", Y_data shape", Y_data.shape)
# Load the pre-trained network.
weights = io.loadmat('data/ex3weights.mat')
theta1, theta2 = weights['Theta1'], weights['Theta2']
print("Theta1 shape", theta1.shape, ", theta2 shape", theta2.shape)
# Classify the input data using the pre-trained network/
a1 = X_data
z2 = np.matmul(a1, np.transpose(theta1)) # (5000,401) @ (25,401).T = (5000,25)
print("z2 shape", z2.shape)
z2 = np.insert(z2, 0, values=np.ones(z2.shape[0]), axis=1)
def sigmoid(z):
return 1 / (1 + np.exp(-z))
a2 = sigmoid(z2)
print("a2 shape", a2.shape) # (5000, 26)
z3 = np.matmul(a2, np.transpose(theta2))
print("z3 shape", z3.shape) # (5000, 10)
a3 = sigmoid(z3)
# Numpy is 0 base index. We add +1 to make it compatible with matlab (so we can
# compare y_pred with the correct answers from Y_data).
y_pred = np.argmax(a3, axis=1) + 1
print("y_pred shape", y_pred.shape) # (5000,)
# Print the report
print(metrics.classification_report(Y_data, y_pred))
| mit |
dkandalov/katas | python/ml/scikit/plot_iris.py | 1 | 1811 | import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
iris = datasets.load_iris()
# we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# We create an instance of SVM and fit out data.
# We do not scale our data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
linear_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, linear_svc, rbf_svc, poly_svc)):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| unlicense |
lifei96/Medium-crawler-with-data-analyzer | User_Crawler/medium_users_data_analyzer.py | 2 | 6117 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import datetime
import os
def users_data_parser():
if not os.path.exists('./result'):
os.mkdir('./result')
file_in = open('./suspended_username_list.txt', 'r')
suspended_username_list = str(file_in.read()).split(' ')
file_in.close()
users_data = pd.read_csv('./result/users_raw_data.csv', sep='\t', encoding='utf-8')
users_data['last_post_date'] = pd.to_datetime(users_data['last_post_date'], errors='coerce')
users_data['reg_date'] = pd.to_datetime(users_data['reg_date'], errors='coerce')
mask = (users_data['reg_date'] >= datetime.datetime(2013, 1, 1)) & (users_data['reg_date'] <= datetime.datetime(2016, 6, 30))
users_data = users_data.loc[mask]
mask = users_data['username'].isin(suspended_username_list)
suspended_users_data = users_data.loc[mask]
twitter_data = pd.read_csv('./result/twitter.csv', sep='\t', encoding='utf-8')
f_f_list = np.sort(((users_data['following_count'] + 0.1) / (users_data['followers_count'] + 0.1)).tolist())
f_f_list2 = np.sort(((twitter_data['following_count'] + 0.1) / (twitter_data['followers_count'] + 0.1)).tolist())
t_f_f_list = np.sort(((twitter_data['t_following_count'] + 0.1) / (twitter_data['t_followers_count'] + 0.1)).tolist())
s_f_f_list = np.sort(((suspended_users_data['following_count'] + 0.1) / (suspended_users_data['followers_count'] + 0.1)).tolist())
plt.figure(figsize=(15, 10))
plt.axis([0.01, 1000, 0, 1])
ax = plt.gca()
ax.set_autoscale_on(False)
plt.xlabel('(following+0.1)/(followers+0.1)')
plt.ylabel('CDF')
plt.yticks(np.linspace(0, 1, 21))
plt.grid()
plt.title('Balance')
line1, = plt.semilogx(f_f_list, np.linspace(0, 1, f_f_list.size), '-g')
line2, = plt.semilogx(f_f_list2, np.linspace(0, 1, f_f_list2.size), '-r')
line3, = plt.semilogx(t_f_f_list, np.linspace(0, 1, t_f_f_list.size), '-b')
line4, = plt.semilogx(s_f_f_list, np.linspace(0, 1, s_f_f_list.size), '--g')
plt.legend((line1, line2, line3, line4), ("all Medium users", "Medium users connected to Twitter", "Twitter users", "Medium users whose Twitter are suspended"), loc=2)
plt.savefig('./result/CDF_balance.png')
plt.close()
reciprocity_data = pd.read_csv('./result/reciprocity.csv', sep='\t', encoding='utf-8')
reciprocity_list = np.sort(((reciprocity_data['reciprocity_count'] + 0.0000000001) / (reciprocity_data['following_count'] + 0.0000000001)).tolist())
plt.figure(figsize=(10, 10))
ax = plt.gca()
ax.set_autoscale_on(False)
plt.xlabel('friends/following')
plt.ylabel('CDF')
plt.yticks(np.linspace(0, 1, 21))
plt.xticks(np.linspace(0, 1, 21))
plt.grid()
plt.title('Reciprocity')
plt.plot(reciprocity_list, np.linspace(0, 1, reciprocity_list.size), label='Reciprocity')
plt.savefig('./result/CDF_reciprocity.png')
plt.close()
f_f_list = np.sort((users_data['following_count']).tolist())
s_f_f_list = np.sort((suspended_users_data['following_count']).tolist())
plt.figure(figsize=(15, 10))
plt.axis([1, 1000, 0, 1])
ax = plt.gca()
ax.set_autoscale_on(False)
plt.xlabel('following')
plt.ylabel('CDF')
plt.grid()
plt.title('CDF_following')
line1, = plt.semilogx(f_f_list, np.linspace(0, 1, f_f_list.size), '-g')
line2, = plt.semilogx(s_f_f_list, np.linspace(0, 1, s_f_f_list.size), '-b')
plt.legend((line1, line2), ("all Medium users", "Medium users whose Twitter are suspended"), loc=4)
plt.savefig('./result/CDF_following.png')
plt.close()
f_f_list = np.sort((users_data['followers_count']).tolist())
s_f_f_list = np.sort((suspended_users_data['followers_count']).tolist())
plt.figure(figsize=(15, 10))
plt.axis([1, 2000, 0, 1])
ax = plt.gca()
ax.set_autoscale_on(False)
plt.xlabel('followers')
plt.ylabel('CDF')
plt.grid()
plt.title('CDF_followers')
line1, = plt.semilogx(f_f_list, np.linspace(0, 1, f_f_list.size), '-g')
line2, = plt.semilogx(s_f_f_list, np.linspace(0, 1, s_f_f_list.size), '-b')
plt.legend((line1, line2), ("all Medium users", "Medium users whose Twitter are suspended"), loc=4)
plt.savefig('./result/CDF_followers.png')
plt.close()
f_f_list = np.sort((users_data['posts_count']).tolist())
s_f_f_list = np.sort((suspended_users_data['posts_count']).tolist())
plt.figure(figsize=(15, 10))
plt.axis([1, 50, 0, 1])
ax = plt.gca()
ax.set_autoscale_on(False)
plt.xlabel('posts')
plt.ylabel('CDF')
plt.grid()
plt.title('CDF_posts')
line1, = plt.semilogx(f_f_list, np.linspace(0, 1, f_f_list.size), '-g')
line2, = plt.semilogx(s_f_f_list, np.linspace(0, 1, s_f_f_list.size), '-b')
plt.legend((line1, line2), ("all Medium users", "Medium users whose Twitter are suspended"), loc=4)
plt.savefig('./result/CDF_posts.png')
plt.close()
mean_median_list = [[users_data['following_count'].mean(), suspended_users_data['following_count'].mean()],
[users_data['following_count'].median(), suspended_users_data['following_count'].median()],
[users_data['followers_count'].mean(), suspended_users_data['followers_count'].mean()],
[users_data['followers_count'].median(), suspended_users_data['followers_count'].median()],
[users_data['posts_count'].mean(), suspended_users_data['posts_count'].mean()],
[users_data['posts_count'].median(), suspended_users_data['posts_count'].median()]]
mean_median = pd.DataFrame(mean_median_list, columns=['All users', 'Suspended users'])
ax = mean_median.plot.bar(figsize=(15, 10), fontsize=16)
ax.set_xticks(mean_median.index)
ax.set_xticklabels(['following_mean', 'following_median', 'followers_mean', 'followers_median', 'posts_mean', 'posts_median'], rotation=0)
plt.savefig('./result/mean_median.png')
plt.close()
if __name__ == '__main__':
users_data_parser()
| mit |
Martinfx/yodaqa | data/ml/fbpath/evaluate_queries_results.py | 1 | 9997 | #!/usr/bin/python -u
#
# Evaluate fbpath-based query performance (on gold standard and as predicted)
#
# Usage: evaluate_queries_results.py traindata.json valdata.json
#
# A model is trained on traindata and then its performance is measured
# on valdata. (FIXME: Duplicate code with fbpath_train_logistic, instead
# of reusing the already-trained model.)
#
# The json data can be generated using:
#
# mkdir -p data/ml/fbpath/wq-fbpath
# cd ../dataset-factoid-webquestions
# for i in trainmodel val devtest; do
# scripts/fulldata.py $i ../yodaqa/data/ml/fbpath/wq-fbpath/ main/ d-dump/ d-freebase-mids/ d-freebase-brp/
# done
#
# Example: data/ml/fbpath/evaluate_queries_results.py data/ml/fbpath/wq-fbpath/trainmodel.json data/ml/fbpath/wq-fbpath/val.json
#
# For every question, the script prints its qId, whether all answers were found
# using gold standard fbpaths, whether any answer was found using gold standard
# fbpaths, whether all answers were found using predicted fbpaths and whether
# any answer was found using predicted fbpaths.
#
# At the end of the script, it prints number of questions and percentual
# information about all/any answers obtained form freebase using gold
# standard/predicted fbpaths plus it prints number of questions which could not
# be answered because SPARQLWrapper does not support long queries.
from SPARQLWrapper import SPARQLWrapper, JSON
import json, sys
from fbpathtrain import VectorizedData
import random, time
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
import numpy as np
from urllib2 import HTTPError
URL = 'http://yodaqa.felk.cvut.cz/fuseki-dbp/dbpedia/query'
def check_q(cfier, v, i):
probs = cfier.predict_proba(v.X.toarray()[i])[0]
top_probs = sorted(enumerate(probs), key=lambda k: k[1], reverse=True)
top_lprobs = ['%s: %.3f' % (v.Ydict.classes_[k[0]], k[1]) for k in top_probs[:15]]
return (sorted(v.Xdict.inverse_transform(v.X[i])[0].keys(), key=lambda s: reversed(s)),
v.Ydict.inverse_transform(cfier.predict(v.X.toarray()[i]))[0],
top_lprobs,
v.Ydict.inverse_transform(np.array([v.Y[i]]))[0])
def generate_query(paths, mid, proba, concepts):
pathQueries = []
for path in paths:
path = [p[1:].replace("/",".") for p in path]
if (len(path) == 1):
pathQueryStr = "{" \
" ns:" + mid + " ns:" + path[0] + " ?val .\n" \
" BIND(\"ns:" + path[0] + "\" AS ?prop)\n" \
" BIND(" + proba + " AS ?score)\n" \
" BIND(0 AS ?branched)\n" \
" BIND(ns:" + mid + " AS ?res)\n" \
" OPTIONAL {\n" \
" ns:" + path[0] + " rdfs:label ?proplabel .\n" \
" FILTER(LANGMATCHES(LANG(?proplabel), \"en\"))\n" \
" }\n" \
"}"
pathQueries.append(pathQueryStr);
elif (len(path) == 2):
pathQueryStr = "{" \
" ns:" + mid + " ns:" + path[0] + "/ns:" + path[1] + " ?val .\n" \
" BIND(\"ns:" + path[0] + "/ns:" + path[1] + "\" AS ?prop)\n" \
" BIND(" + proba + " AS ?score)\n" \
" BIND(0 AS ?branched)\n" \
" BIND(ns:" + mid + " AS ?res)\n" \
" OPTIONAL {\n" \
" ns:" + path[0] + " rdfs:label ?pl0 .\n" \
" ns:" + path[1] + " rdfs:label ?pl1 .\n" \
" FILTER(LANGMATCHES(LANG(?pl0), \"en\"))\n" \
" FILTER(LANGMATCHES(LANG(?pl1), \"en\"))\n" \
" BIND(CONCAT(?pl0, \": \", ?pl1) AS ?proplabel)\n" \
" }\n" \
"}"
pathQueries.append(pathQueryStr);
elif (len(path) == 3):
for concept in concepts:
witnessRel = path[2];
quotedTitle = concept['fullLabel'].replace("\"", "").replace("\\\\", "").replace("\n", " ")
pathQueryStr = "{" \
" ns:" + mid + " ns:" + path[0] + " ?med .\n" \
" ?med ns:" + path[1] + " ?val .\n" \
" {\n" \
" ?med ns:" + witnessRel + " ?concept .\n" \
" ?concept <http://rdf.freebase.com/key/wikipedia.en_id> \"" + concept['pageID'] + "\" .\n" \
" } UNION {\n" \
" {\n" \
" ?med ns:" + witnessRel + " ?wlabel .\n" \
" FILTER(!ISURI(?wlabel))\n" \
" } UNION {\n" \
" ?med ns:" + witnessRel + " ?concept .\n" \
" ?concept rdfs:label ?wlabel .\n" \
" }\n" \
" FILTER(LANGMATCHES(LANG(?wlabel), \"en\"))\n" \
" FILTER(CONTAINS(LCASE(?wlabel), LCASE(\"" + quotedTitle + "\")))\n" \
" }\n" \
" BIND(\"ns:" + path[0] + "/ns:" + path[1] + "\" AS ?prop)\n" \
" BIND(" + proba + " AS ?score)\n" \
" BIND(1 AS ?branched)\n" \
" BIND(ns:" + mid + " AS ?res)\n" \
" OPTIONAL {\n" \
" ns:" + path[0] + " rdfs:label ?pl0 .\n" \
" ns:" + path[1] + " rdfs:label ?pl1 .\n" \
" FILTER(LANGMATCHES(LANG(?pl0), \"en\"))\n" \
" FILTER(LANGMATCHES(LANG(?pl1), \"en\"))\n" \
" BIND(CONCAT(?pl0, \": \", ?pl1) AS ?proplabel)\n" \
" }\n" \
"}"
pathQueries.append(pathQueryStr)
return pathQueries
def generate_results(paths, mids, concepts):
prefix = """PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX ns: <http://rdf.freebase.com/ns/>
SELECT ?property ?value ?prop ?val ?res ?score ?branched ?witnessAF WHERE {"""
postfix = """BIND( IF(BOUND(?proplabel), ?proplabel, ?prop) AS ?property )
OPTIONAL {
?val rdfs:label ?vallabel .
FILTER( LANGMATCHES(LANG(?vallabel), "en") )
}
BIND( IF(BOUND(?vallabel), ?vallabel, ?val) AS ?value )
FILTER( !ISURI(?value) )
FILTER( LANG(?value) = "" || LANGMATCHES(LANG(?value), "en") )
}LIMIT 400"""
results = []
for m in mids:
tmp = generate_query(paths, m, "1", concepts)
if (len(tmp) == 0):
return []
sparql = SPARQLWrapper(URL)
sparql.setReturnFormat(JSON)
query = prefix + " UNION ".join(tmp) + postfix
# print(query)
sparql.setQuery(query)
res = sparql.query().convert()
# print("")
# print(res)
results += list(set([r['value']['value'] for r in res['results']['bindings']]))
return results
def mid_by_pageid(pageID):
query = '''PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX ns: <http://rdf.freebase.com/ns/>
SELECT * WHERE {
?topic <http://rdf.freebase.com/key/wikipedia.en_id> "''' + pageID + '''" .
?topic rdfs:label ?label .
FILTER( LANGMATCHES(LANG(?label), "en") )
}'''
sparql = SPARQLWrapper(URL)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
res = sparql.query().convert()
ret = []
for r in res['results']['bindings']:
ret.append(r['topic']['value'][27:])
if (ret == []):
return ""
return ret[0]
if __name__ == '__main__':
with open(sys.argv[1], 'r') as f:
traindata = VectorizedData(json.load(f))
print('// traindata: %d questions, %d features, %d fbpaths' % (
np.size(traindata.X, axis=0), np.size(traindata.X, axis=1), np.size(traindata.Y, axis=1)))
sys.stdout.flush()
t_start = time.clock()
cfier = OneVsRestClassifier(LogisticRegression(penalty='l1'), n_jobs=4)
cfier.fit(traindata.X, traindata.Y)
with open(sys.argv[2]) as f:
full = json.load(f)
full_data = VectorizedData(full, traindata.Xdict, traindata.Ydict)
error = 0
anyCnt = 0
allCnt = 0
anyPCnt = 0
allPCnt = 0
for i, line in enumerate(full):
concepts = line['Concept']
mids = [c["mid"] for c in line['freebaseMids']]
relpaths = [c[0] for c in line['relPaths']]
mids_from_pageids = [mid_by_pageid(c['pageID']) for c in line['Concept']]
filter(lambda a: a != "", mids_from_pageids)
predicted_paths = [lab.split(":")[0].split("|") for lab in check_q(cfier, full_data, i)[2]]
# print(predicted_paths)
try:
results = generate_results(relpaths, mids, concepts)
predicted_results = generate_results(predicted_paths, mids_from_pageids, concepts)
except HTTPError:
error += 1
continue
# print(results)
allAnswers = True
allAnswersPredicted = True
anyAnswers = False
anyAnswersPredicted = False
for a in line["answers"]:
if (a in results):
anyAnswers = True
else:
allAnswers = False
if (a in predicted_results):
anyAnswersPredicted = True
else:
allAnswersPredicted = False
if (anyAnswers):
anyCnt += 1
if (anyAnswersPredicted):
anyPCnt += 1
if (allAnswersPredicted):
allPCnt += 1
if (allAnswers):
allCnt += 1
print("qID %s, all: %s, all form predicted: %s, any: %s, any form predicted: %s" % (line['qId'], allAnswers, allAnswersPredicted, anyAnswers, anyAnswersPredicted))
print("SUMARRY")
print("Number of questions: %d, all: %f, all predicted: %f, any: %f, any predicted: %f, http error: %d" %
(len(full), (1.0*allCnt)/len(full), (1.0*allPCnt)/len(full), (1.0*anyCnt)/len(full), (1.0*anyPCnt)/len(full), error))
| apache-2.0 |
wzbozon/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
tgrammat/ML-Data_Challenges | Dato-tutorials/anomaly-detection/visualization_helper_functions.py | 3 | 11251 | # libraries required
import graphlab.aggregate as agg
from matplotlib import pyplot as plt
import seaborn as sns
def item_freq_plot(data_sf, item_column, hue=None, topk=None, pct_threshold=None ,reverse=False,
seaborn_style='whitegrid', seaborn_palette='deep', color='b', **kwargs):
'''Function for topk item frequency plot:
Parameters
----------
data_sf: SFrame
SFrame for plotting. If x and y are absent, this is interpreted as wide-form.
Otherwise it is expected to be long-form.
item_column: string
The attribute name the frequency counts of which we want to visualize
hue: seaborn barplot name of variable in vector data, optional
Inputs for plotting long-form data. See seaborn examples for interpretation.
topk: int, optional
The number of most frequent items
pct_threshold: float in [0,100] range, optional
Lower frequency limit below which all the grouby counted items will be ignored.
seaborn_style: dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
Set the aesthetic style of the plots through the seaborn module.
A dictionary of parameters or the name of a preconfigured set.
seaborn_palette: {deep, muted, pastel, dark, bright, colorblind}
Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like 'b' or 'g'
are interpreted by matplotlib in subsequent plots.
color: matplotlib color, optional
Color for all of the elements, or seed for light_palette()
when using hue nesting in seaborn.barplot().
kwargs : key, value mappings
Other keyword arguments which are passed through (a)seaborn.countplot API
and/or (b)plt.bar at draw time.
'''
# set seaborn style
sns.set(style=seaborn_style)
# compute the item counts: (1) apply groupby count operation,
# (2) check whether a nested grouping exist or not
if hue is not None:
item_counts = data_sf.groupby([item_column,hue], agg.COUNT())
hue_order = list(data_sf[hue].unique())
hue_length = len(hue_order)
else:
item_counts = data_sf.groupby(item_column, agg.COUNT())
hue_order=None
hue_length=1
# compute frequencies
pcts = (item_counts['Count'] / float(item_counts['Count'].sum())) * 100
item_counts['Percent'] = pcts
# apply a percentage threshold if any
if((pct_threshold is not None) & (pct_threshold < 100)):
item_counts = item_counts[item_counts['Percent'] >= pct_threshold]
elif((pct_threshold is not None) & (pct_threshold >=1)):
print 'The frequency threshold was unacceptably high.',\
'and have been removed from consideration.',\
'If you want to use this flag please choose a value lower than one.'
# print the number of remaining item counts
print 'Number of Unique Items: %d' % len(item_counts)
# determine the ysize per item
ysize_per_item = 0.5 * hue_length
# apply topk/sort operations
if((topk is not None) & (topk < len(item_counts))):
item_counts = item_counts.topk('Percent', k=topk, reverse=reverse)
ysize = ysize_per_item * topk
print 'Number of Most Frequent Items, Visualized: %d' % topk
else:
item_counts = item_counts.sort('Percent', ascending=False)
ysize = ysize_per_item * len(item_counts)
print 'Number of Most Frequent Items, Visualized: %d' % len(item_counts)
# transform the item_counts SFrame into a Pandas DataFrame
item_counts_df = item_counts.to_dataframe()
# initialize the matplotlib figure
ax = plt.figure(figsize=(7, ysize))
# plot the Freq Percentages of the topk Items
ax = sns.barplot(x='Percent', y=item_column, hue=hue, data=item_counts_df,
order=list(item_counts_df[item_column]), hue_order=hue_order,
orient='h', color='b', palette='deep')
# add informative axis labels
# make final plot adjustments
xmax = max(item_counts['Percent'])
ax.set(xlim=(0, xmax),
ylabel= item_column,
xlabel='Most Frequent Items\n(% of total occurences)')
if hue is not None:
ax.legend(ncol=hue_length, loc="lower right", frameon=True)
sns.despine(left=True, bottom=True)
def segments_countplot(data_sf, x=None, y=None, hue=None,
order=None, hue_order=None, figsize_tuple= None, title=None,
seaborn_style='whitegrid', seaborn_palette='deep', color='b',
**kwargs):
'''Function for fancy seaborn barplot:
Parameters
----------
data_sf: SFrame
SFrame for plotting. If x and y are absent, this is interpreted as wide-form.
Otherwise it is expected to be long-form.
x, y, hue: seaborn countplot names of variables in data or vector data, optional
Inputs for plotting long-form data. See examples for interpretation.
order, hue_order: seaborn countplot lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are inferred from the data objects.
figsize_tuple: tuple of integers, optional, default: None
width, height in inches. If not provided, defaults to rc figure.figsize.
title: string
Provides the countplot title.
seaborn_style: dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
Set the aesthetic style of the plots through the seaborn module.
A dictionary of parameters or the name of a preconfigured set.
seaborn_palette: {deep, muted, pastel, dark, bright, colorblind}
Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like 'b' or 'g'
are interpreted by matplotlib in subsequent plots.
color: matplotlib color, optional
Color for all of the elements, or seed for light_palette()
when using hue nesting in seaborn.barplot().
kwargs : key, value mappings
Other keyword arguments which are passed through (a)seaborn.countplot API
and/or (b)plt.bar at draw time.
'''
# define the plotting style
sns.set(style=seaborn_style)
# initialize the matplotlib figure
plt.figure(figsize=figsize_tuple)
# transform the SFrame into a Pandas DataFrame
data_df = data_sf.to_dataframe()
# plot the segments counts
ax = sns.countplot(x=x, y=y, hue=hue, data=data_df, order=order, hue_order=hue_order,
orient='v', palette=seaborn_palette, color=color, **kwargs)
# add informative axis labels, title
# make final plot adjustments
plt.title(title, {'fontweight': 'bold'})
sns.despine(left=True, bottom=True)
plt.show()
def univariate_summary_statistics_plot(data_sf, attribs_list, nsubplots_inrow=3, subplots_wspace=0.5,
seaborn_style='whitegrid', seaborn_palette='deep', color='b',
**kwargs):
'''Function for fancy univariate summary plot:
Parameters
----------
data_sf: SFrame
SFrame of interest
attribs_list: list of strings
Provides the list of SFrame attributes the univariate plots of which we want to draw
nsubplots_inrow: int
Determines the desired number of subplots per row.
seaborn_style: dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
Set the aesthetic style of the plots through the seaborn module.
A dictionary of parameters or the name of a preconfigured set.
seaborn_palette: {deep, muted, pastel, dark, bright, colorblind}
Change how matplotlib color shorthands are interpreted.
Calling this will change how shorthand codes like 'b' or 'g'
are interpreted by matplotlib in subsequent plots.
color: matplotlib color, optional
Color for all of the elements, or seed for light_palette()
when using hue nesting in seaborn.barplot().
'''
import graphlab as gl
# transform the SFrame into a Pandas DataFrame
if isinstance(data_sf, gl.data_structures.sframe.SFrame):
data_df = data_sf.to_dataframe()
else:
data_df = data_sf
# define the plotting style
sns.set(style=seaborn_style)
# remove any offending attributes for a univariate summary statistics
# filtering function
def is_appropriate_attrib(attrib):
if(data_df[attrib].dtype != 'datetime64[ns]'):
return True
else:
return False
# apply the filtering function
attribs_list_before = attribs_list
attribs_list = list(filter(is_appropriate_attrib, attribs_list))
xattribs_list =list([attrib for\
attrib in attribs_list_before if(attrib not in attribs_list)])
if(len(xattribs_list) !=0):
print 'These attributes are not appropriate for a univariate summary statistics,',\
'and have been removed from consideration:'
print xattribs_list, '\n'
# initialize the matplotlib figure
nattribs = len(attribs_list)
# compute the sublots nrows
nrows = ((nattribs-1)/nsubplots_inrow) + 1
# compute the subplots ncols
if(nattribs >= nsubplots_inrow):
ncols = nsubplots_inrow
else:
ncols = nattribs
# compute the subplots ysize
row_ysize = 9
ysize = nrows * row_ysize
# set figure dimensions
plt.rcParams['figure.figsize'] = (14, ysize)
#fig = plt.figure(figsize=(14, ysize))
# draw the relavant univariate plots for each attribute of interest
num_plot = 1
for attrib in attribs_list:
if(data_df[attrib].dtype == object):
plt.subplot(nrows, ncols, num_plot)
sns.countplot(y=attrib, data=data_df,
palette=seaborn_palette, color=color, **kwargs)
plt.xticks(rotation=45)
plt.ylabel(attrib, {'fontweight': 'bold'})
elif((data_df[attrib].dtype == float) | (data_df[attrib].dtype == int)):
plt.subplot(nrows, ncols, num_plot)
sns.boxplot(y=attrib, data=data_df,
palette=seaborn_palette, color=color, **kwargs)
plt.ylabel(attrib, {'fontweight': 'bold'})
num_plot +=1
# final plot adjustments
sns.despine(left=True, bottom=True)
if subplots_wspace < 0.2:
print 'Subplots White Space was less than default, 0.2.'
print 'The default vaule is going to be used: \'subplots_wspace=0.2\''
subplots_wspace =0.2
plt.subplots_adjust(wspace=subplots_wspace)
plt.show()
# print the corresponding summary statistic
print '\n', 'Univariate Summary Statistics:\n'
summary = data_df[attribs_list].describe(include='all')
print summary
def plot_time_series(timestamp, values, title, **kwargs):
plt.rcParams['figure.figsize'] = 14, 7
plt.plot_date(timestamp, values, fmt='g-', tz='utc', **kwargs)
plt.title(title)
plt.xlabel('Year')
plt.ylabel('Dollars per Barrel')
plt.rcParams.update({'font.size': 16})
| apache-2.0 |
lijiabogithub/QUANTAXIS | QUANTAXIS/QACmd/strategy_sample_simple.py | 1 | 1511 | # encoding: UTF-8
import QUANTAXIS as QA
from QUANTAXIS.QAFetch.QAQuery import QA_fetch_data
from pymongo import MongoClient
from QUANTAXIS.QAUtil import QA_util_date_stamp,QA_util_log_info
from QUANTAXIS.QAMarket import QA_QAMarket_bid,QA_Market
from QUANTAXIS.QABacktest.QABacktest import QA_Backtest
from QUANTAXIS.QAARP import QAAccount,QAPortfolio,QARisk
from QUANTAXIS.QASignal import QA_signal_send
from QUANTAXIS.QASignal import (QA_Signal_eventManager,QA_Signal_events,
QA_Signal_Listener,QA_Signal_Sender,QA_signal_usual_model)
import pandas
from threading import *
class backtest(QA_Backtest):
def QA_backtest_init(self):
pass
def QA_backtest_start(self):
pass
def signal_handle(self):
pass
def message_center(self,name,listener_name):
class QASS(QA_Signal_Sender):
def QAS_send(self):
pass
class QASL(QA_Signal_Listener):
def QA_receive(self,event):
pass
eventManager = QA_Signal_eventManager()
for item in range(0,len(listener_name),1):
listner = QASL(listener_name[item]) #订阅
eventManager.AddEventListener(name,listner.QA_receive)
#绑定事件和监听器响应函数
eventManager.Start()
publicAcc = QASS(eventManager)
timer = Timer(1, publicAcc.QAS_send)
timer.start()
###运行
backtest=backtest()
backtest.QA_backtest_init()
backtest.QA_backtest_start() | mit |
plissonf/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
compops/gpo-abc2015 | scripts-paper/example1-gposmc.py | 2 | 5416 | ##############################################################################
##############################################################################
# Estimating the volatility of synthetic data
# using a stochastic volatility (SV) model with Gaussian log-returns.
#
# The SV model is inferred using the GPO-SMC algorithm.
#
# For more details, see https://github.com/compops/gpo-abc2015
#
# (c) 2016 Johan Dahlin
# liu (at) johandahlin.com
#
##############################################################################
##############################################################################
import sys
sys.path.insert(0, '/media/sf_home/src/gpo-abc2015')
# Setup files
output_file = 'results/example1/example1-gposmc'
# Load packages and helpers
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from state import smc
from para import gpo_gpy
from models import hwsv_4parameters
from misc.portfolio import ensure_dir
# Set the seed for re-producibility
np.random.seed(87655678)
##############################################################################
# Arrange the data structures
##############################################################################
sm = smc.smcSampler()
gpo = gpo_gpy.stGPO()
##############################################################################
# Setup the system
##############################################################################
sys = hwsv_4parameters.ssm()
sys.par = np.zeros((sys.nPar, 1))
sys.par[0] = 0.20
sys.par[1] = 0.96
sys.par[2] = 0.15
sys.par[3] = 0.00
sys.T = 500
sys.xo = 0.0
sys.version = "standard"
sys.transformY = "none"
##############################################################################
# Generate data
##############################################################################
sys.generateData(
fileName='data/hwsv_4parameters_syntheticT500.csv', order="xy")
##############################################################################
# Setup the parameters
##############################################################################
th = hwsv_4parameters.ssm()
th.nParInference = 3
th.copyData(sys)
th.version = "standard"
th.transformY = "none"
##############################################################################
# Setup the GPO algorithm
##############################################################################
settings = {'gpo_initPar': np.array([0.00, 0.95, 0.50, 1.80]),
'gpo_upperBounds': np.array([1.00, 1.00, 1.00, 2.00]),
'gpo_lowerBounds': np.array([0.00, 0.00, 0.01, 1.20]),
'gpo_estHypParInterval': 25,
'gpo_preIter': 50,
'gpo_maxIter': 450,
'smc_weightdist': "gaussian",
'smc_tolLevel': 0.10,
'smc_nPart': 2000
}
gpo.initPar = settings['gpo_initPar'][0:th.nParInference]
gpo.upperBounds = settings['gpo_upperBounds'][0:th.nParInference]
gpo.lowerBounds = settings['gpo_lowerBounds'][0:th.nParInference]
gpo.maxIter = settings['gpo_maxIter']
gpo.preIter = settings['gpo_preIter']
gpo.EstimateHyperparametersInterval = settings['gpo_estHypParInterval']
gpo.verbose = True
gpo.jitteringCovariance = 0.01 * np.diag(np.ones(th.nParInference))
gpo.preSamplingMethod = "latinHyperCube"
gpo.EstimateThHatEveryIteration = False
gpo.EstimateHessianEveryIteration = False
##############################################################################
# Setup the SMC algorithm
##############################################################################
sm.filter = sm.bPF
sm.nPart = settings['smc_nPart']
sm.genInitialState = True
sm.xo = sys.xo
th.xo = sys.xo
##############################################################################
# GPO using the Particle filter
##############################################################################
# Run the GPO routine
gpo.bayes(sm, sys, th)
# Estimate inverse Hessian
gpo.estimateHessian()
#############################################################################
# Write results to file
##############################################################################
ensure_dir(output_file + '.csv')
# Model parameters
fileOut = pd.DataFrame(gpo.thhat)
fileOut.to_csv(output_file + '-model.csv')
# Inverse Hessian estimate
fileOut = pd.DataFrame(gpo.invHessianEstimate)
fileOut.to_csv(output_file + '-modelvar.csv')
##############################################################################
# GPO using the Particle filter (comparison with SPSA)
##############################################################################
# Set the seed for re-producibility
np.random.seed(87655678)
# Run the GPO routine
settings['gpo_maxIter'] = 700 - settings['gpo_preIter']
gpo.maxIter = settings['gpo_maxIter']
gpo.EstimateThHatEveryIteration = True
gpo.bayes(sm, sys, th)
# Write output
gpo.writeToFile(sm, fileOutName=output_file + '-run.csv')
##############################################################################
##############################################################################
# End of file
##############################################################################
##############################################################################
| mit |
biolab/red | examples.py | 1 | 3607 | """
Copyright (C) 2013 Marinka Zitnik <[email protected]>
This file is part of Red.
Red is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Red is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Red. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import numpy as np
from sklearn.metrics import roc_auc_score
from data import loader
from red import Red
def run_red(G, S, H, genes):
c = 100
alpha = 0.1
lambda_u = 1e-4
lambda_v = lambda_u
beta = alpha
gene_red = Red(G, S, H, genes)
gene_red.order(rank=c, lambda_u=lambda_u, lambda_v=lambda_v,
alpha=alpha, beta=beta,
verbose=False, callback=None)
return gene_red
def predict_alleviating(G, S, H, genes, alleviating_set):
g2i = {g: i for i, g in enumerate(genes)}
for cls, g1, g2 in alleviating_set:
G[g2i[g1], g2i[g2]] = G[g2i[g2], g2i[g1]] = np.nan
gene_red = run_red(G, S, H, genes)
pred = [gene_red.alleviating(u, v) for _, u, v in alleviating_set]
auc = roc_auc_score(zip(*alleviating_set)[0], pred)
print "Alleviating AUC: %5.4f" % auc
return gene_red, auc
def predict_kegg(G, S, H, genes, kegg):
gene_red = run_red(G, S, H, genes)
pred = [gene_red.epistatic_to(v, u) for _, u, v in kegg]
auc = roc_auc_score(zip(*kegg)[0], pred)
print "KEGG AUC: %5.4f" % auc
return gene_red, auc
def predict_glycans(G, S, H, genes, glycans):
gene_red = run_red(G, S, H, genes)
pred = [gene_red.epistatic_to(v, u) for _, u, v in glycans]
auc = roc_auc_score(zip(*glycans)[0], pred)
print "N-linked glycosylation AUC: %5.4f" % auc
return gene_red, auc
path = os.path.abspath("data/080930a_DM_data.mat")
G, S, H, genes = loader.load_jonikas_data(path)
np.random.seed(42)
# 1
path_ord_kegg = os.path.abspath("data/KEGG_ordered.txt")
path_unord_kegg = os.path.abspath("data/KEGG_nonordered.txt")
kegg = loader.load_battle_KEGG_data(path_ord_kegg, path_unord_kegg)
predict_kegg(G, S, H, genes, kegg)
# 2
path_neg_glycans = os.path.abspath("data/N-linked-glycans_negative.txt")
path_pos_glycans = os.path.abspath("data/N-linked-glycans_positive.txt")
glycans = loader.load_n_linked_glycans_data(path_pos_glycans, path_neg_glycans)
predict_glycans(G, S, H, genes, glycans)
# 3
alleviating_set = loader.get_alleviating_interactions(path)
predict_alleviating(G, S, H, genes, alleviating_set)
# 4
gene_red = run_red(G, S, H, genes)
glycan_genes = {'CWH41', 'DIE2', 'ALG8', 'ALG6', 'ALG5', 'ALG12',
'ALG9', 'ALG3', 'OST3', 'OST5'}
erad_genes = {'MNL1', 'YOS9', 'YLR104W', 'DER1', 'USA1', 'HRD3', 'HRD1',
'UBC7', 'CUE1'}
tailanch_genes = {'SGT2', 'MDY2', 'YOR164C',
'GET3', 'GET2', 'GET1'}
print '\n**N-linked glycosylation pathway'
gene_red.print_relationships(glycan_genes)
gene_red.construct_network(glycan_genes)
print '\n**ERAD pathway'
gene_red.print_relationships(erad_genes)
gene_red.construct_network(erad_genes)
print '\n**Tail-anchored protein insertion pathway'
gene_red.print_relationships(tailanch_genes)
gene_red.construct_network(tailanch_genes) | gpl-3.0 |
nvoron23/statsmodels | statsmodels/examples/ex_generic_mle.py | 32 | 16462 |
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
data = sm.datasets.spector.load()
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=probitloglike)
res = mod.fit(method="nm", fargs=(data.endog,data.exog), maxiter=500)
print(res)
#np.allclose(res.params, probit_res.params)
print(res.params, probit_res.params)
#datal = sm.datasets.longley.load()
datal = sm.datasets.ccard.load()
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model doesn't work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print(res_norm.params)
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000,
fargs=(datal.endog, datal.exog))
print(res_norm.params)
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
print(res_norm2.params)
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print(start_params)
print(res_norm3.params)
print(res2.bse)
#print res_norm3.bse # not available
print('llf', res2.llf, res_norm3.llf)
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I don't think score was supposed to be called
'''
>>> mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None, maxiter
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 316, in fit
disp=disp, retall=retall, callback=callback)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
710, in fmin_bfgs
gfk = myfprime(x0)
File "C:\Josef\_progs\Subversion\scipy-trunk_after\trunk\dist\scipy-0.9.0.dev6
579.win32\Programs\Python25\Lib\site-packages\scipy\optimize\optimize.py", line
103, in function_wrapper
return function(x, *args)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 240, in <lambda>
score = lambda params: -self.score(params)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\model.py", line 480, in score
return approx_fprime1(params, self.nloglike)
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\s
tatsmodels\sandbox\regression\numdiff.py", line 81, in approx_fprime1
nobs = np.size(f0) #len(f0)
TypeError: object of type 'numpy.float64' has no len()
'''
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter = 500, retall=0)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print(np.linalg.eigh(hh))
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print(grad)
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print((gradb+gradf)/2.)
print(res_norm3.model.score(res_norm3.params))
print(res_norm3.model.score(start_params))
mod_norm2.loglike(start_params/2.)
print(np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params)))
print(np.sqrt(np.diag(res_bfgs.cov_params())))
print(res_norm3.bse)
print("MLE - OLS parameter estimates")
print(res_norm3.params[:-1] - res2.params)
print("bse diff in percent")
print((res_norm3.bse[:-1] / res2.bse)*100. - 100)
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "c:\josef\eclipsegworkspace\statsmodels-josef-experimental-gsoc\scikits\statsmodels\model.py", line 993, in conf_int
lower = self.params - dist.ppf(1-alpha/2,self.model.df_resid) *\
AttributeError: 'MygMLE' object has no attribute 'df_resid'
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print(res_norm3.model.score_obs(res_norm3.params).shape)
jac = res_norm3.model.score_obs(res_norm3.params)
print(np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params)
jac2 = res_norm3.model.score_obs(res_norm3.params, centered=True)
print(np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac)))))
print(res_norm3.bse)
print(res2.bse)
| bsd-3-clause |
spacelis/hrnn4sim | hrnn4sim/base.py | 1 | 6693 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: training.py
Author: Wen Li
Email: [email protected]
Github: http://github.com/spacelis
Description: Training utility functions
"""
# pylint: disable=invalid-name
from __future__ import print_function
from datetime import datetime
from os.path import join as pjoin
import pandas as pd
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from keras import backend as K
from keras.callbacks import TensorBoard, ModelCheckpoint
from .vectorization import get_fullbatch, get_minibatches
from .vectorization import dataset_tokenize
def read_data(fin, filename):
""" Resove file format for the input file and return a file object """
if filename.endswith('.csv'):
return pd.read_csv(fin)
elif filename.endswith('.feather'):
return pd.read_feather(filename)
raise ValueError(f'File format not supported: {filename}')
class ModelBase(object):
""" A Base model for handling training, validation and prediction"""
def __init__(self, log_device=False):
super(ModelBase, self).__init__()
self.model = None
self.vectorizer = None
if log_device:
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
else:
self.session = tf.Session()
def split_examples(self, examples, ratio=0.8): # pylint: disable=no-self-use
''' Split training and validating data set '''
total_cnt = len(examples)
train_cnt = int(total_cnt * ratio)
valid_cnt = total_cnt - train_cnt
train_set = examples[:train_cnt]
valid_set = examples[-valid_cnt:]
return train_set, valid_set
def make_vectorizer(self, examples, **kwargs):
''' Make a vectorizer for the model '''
raise NotImplementedError()
def build(self):
''' Build the model '''
raise NotImplementedError()
def save_model(self, job_dir, model_dir, model_label):
""" Save the trained model to the job_dir"""
self.model.save_weights('model.h5.tmp')
with file_io.FileIO('model.h5.tmp', mode='rb') as fin:
model_path = pjoin(job_dir, model_dir, 'model_{}.h5'.format(model_label))
with file_io.FileIO(model_path, mode='wb') as fout:
fout.write(fin.read())
print("Saved {}".format(model_path))
def load_model(self, job_dir, model_dir, model_label):
""" Loading model from files """
model_path = pjoin(job_dir, model_dir, 'model_{}.h5'.format(model_label))
with file_io.FileIO(model_path, mode='rb') as fin:
with file_io.FileIO('model.h5.tmp', mode='wb') as fout:
fout.write(fin.read())
self.model.load_weights('model.h5.tmp')
print("Load {}".format(model_path))
def train(self, trainfile, model_label=None, # pylint: disable=too-many-arguments
epochs=30, batch_size=100,
val_file=None, val_split=0.8,
shuffle=False, include_eos=False,
job_dir='.', model_dir='ckpt'):
# pylint: disable=too-many-locals
''' Train the model '''
with file_io.FileIO(trainfile, 'r') as fin:
examples = read_data(fin, trainfile)
if shuffle:
examples = examples.sample(frac=1).reset_index(drop=True)
else:
examples = examples.reset_index(drop=True)
if val_file is not None:
with file_io.FileIO(trainfile, 'r') as fin:
val_examples = read_data(fin, trainfile)
if shuffle:
val_examples = val_examples.sample(frac=1).reset_index(drop=True)
else:
val_examples = val_examples.reset_index(drop=True)
self.vectorizer = self.make_vectorizer(pd.concat([examples, val_examples]),
include_eos=include_eos)
else:
self.vectorizer = self.make_vectorizer(examples, include_eos=include_eos)
self.build()
if model_label is not None:
self.load_model(job_dir, model_dir, model_label)
label = '{}_{}'.format(self.__class__.__name__, datetime.now().strftime("%Y%m%d_%H%M%S"))
# Write Summaries to Tensorboard log
tensorboardCB = TensorBoard(
log_dir=pjoin(job_dir, 'tfgraph', label),
#histogram_freq=100,
write_graph=True)
ckpt_label = '{}_epoch_{{epoch:02d}}_acc_{{val_acc:.4f}}'.format(label)
checkpointCB = ModelCheckpoint(ckpt_label, monitor='val_acc', save_weights_only=True)
# Train the model
if val_file is not None:
train_set, valid_set = self.split_examples(examples, val_split)
else:
train_set, valid_set = examples, val_examples
x, y = get_fullbatch(train_set, self.vectorizer, multiple=batch_size)
vx, vy = get_fullbatch(valid_set, self.vectorizer, multiple=batch_size)
# Training
K.set_session(self.session)
self.model.fit(x, y, batch_size=batch_size, epochs=epochs,
validation_data=(vx, vy),
callbacks=[tensorboardCB, checkpointCB])
# Validation
loss, acc = self.model.evaluate(vx, vy, batch_size=batch_size)
model_label = '{}_loss_{:.4f}_acc_{:.4f}'.format(label, loss, acc)
self.save_model(job_dir, model_dir, model_label)
print()
print('Loss =', loss)
print('Accuracy =', acc)
def test(self, testfile, model_label, batch_size=100, # pylint: disable=too-many-arguments
include_eos=False, job_dir='.', model_dir='ckpt'):
""" Evaluate model on the test data """
with file_io.FileIO(testfile, 'r') as fin:
examples = read_data(fin, testfile)
self.vectorizer = self.make_vectorizer(examples, include_eos=include_eos)
self.build()
K.set_session(self.session)
self.load_model(job_dir, model_dir, model_label)
x, y = get_fullbatch(examples, self.vectorizer, multiple=batch_size)
loss, acc = self.model.evaluate(x, y, batch_size=batch_size)
print()
print('Loss =', loss)
print('Accuracy =', acc)
def predict(self, items, batch_size=100):
''' Predict the the matching of the items '''
x = get_fullbatch(dataset_tokenize(items), self.vectorizer, with_y=False)
K.set_session(self.session)
pred = self.model.predict(x, batch_size=batch_size)
return pd.DataFrame({
'seqa': items['seqa'],
'seqb': items['seqb'],
'matched': pred,
})
| mit |
Winand/pandas | pandas/tests/indexes/timedeltas/test_ops.py | 6 | 48590 | import pytest
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index)
from pandas._libs.tslib import iNaT
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setup_method(self, method):
super(TestTimedeltaIndexOps, self).setup_method(method)
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
f = lambda x: isinstance(x, TimedeltaIndex)
self.check_ops_properties(TimedeltaIndex._field_ops, f)
self.check_ops_properties(TimedeltaIndex._object_ops, f)
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timedelta('1 days')
assert idx.max() == Timedelta('3 days')
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
assert np.min(td) == Timedelta('16815 days')
assert np.max(td) == Timedelta('16820 days')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, td, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, td, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
td.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, td.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
assert result == expected
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assert_raises_regex(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
pytest.raises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# floor divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng // offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
pytest.raises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
pytest.raises(TypeError, lambda: tdi - dt)
pytest.raises(TypeError, lambda: tdi - dti)
pytest.raises(TypeError, lambda: td - dt)
pytest.raises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
pytest.raises(TypeError, lambda: dt_tz - ts)
pytest.raises(TypeError, lambda: dt_tz - dt)
pytest.raises(TypeError, lambda: dt_tz - ts_tz2)
pytest.raises(TypeError, lambda: dt - dt_tz)
pytest.raises(TypeError, lambda: ts - dt_tz)
pytest.raises(TypeError, lambda: ts_tz2 - ts)
pytest.raises(TypeError, lambda: ts_tz2 - dt)
pytest.raises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
pytest.raises(TypeError, lambda: dti - ts_tz)
pytest.raises(TypeError, lambda: dti_tz - ts)
pytest.raises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
pytest.raises(ValueError, lambda: tdi + dti[0:1])
pytest.raises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
pytest.raises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
assert idx[0] in idx
def test_unknown_attribute(self):
# see gh-9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
assert 'foo' not in ts.__dict__.keys()
pytest.raises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
assert result == pd.Timedelta('1 day')
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Timedelta('1 day')
result = idx.take([-1])
assert result == pd.Timedelta('31 day')
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_nat(self):
assert pd.TimedeltaIndex._na_value is pd.NaT
assert pd.TimedeltaIndex([])._na_value is pd.NaT
idx = pd.TimedeltaIndex(['1 days', '2 days'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.asobject.equals(idx2.asobject)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
assert -td == Timedelta(-10, unit='d')
assert +td == Timedelta(10, unit='d')
assert td - td == Timedelta(0, unit='ns')
assert (td - pd.NaT) is pd.NaT
assert td + td == Timedelta(20, unit='d')
assert (td + pd.NaT) is pd.NaT
assert td * 2 == Timedelta(20, unit='d')
assert (td * pd.NaT) is pd.NaT
assert td / 2 == Timedelta(5, unit='d')
assert td // 2 == Timedelta(5, unit='d')
assert abs(td) == td
assert abs(-td) == td
assert td / td == 1
assert (td / pd.NaT) is np.nan
assert (td // pd.NaT) is np.nan
# invert
assert -td == Timedelta('-10d')
assert td * -1 == Timedelta('-10d')
assert -1 * td == Timedelta('-10d')
assert abs(-td) == Timedelta('10d')
# invalid multiply with another timedelta
pytest.raises(TypeError, lambda: td * td)
# can't operate with integers
pytest.raises(TypeError, lambda: td + 2)
pytest.raises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1)
assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td
assert 240 == td / pd.offsets.Hour(1)
assert 1 / 240.0 == pd.offsets.Hour(1) / td
assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other + td, expected)
pytest.raises(TypeError, lambda: td + np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
tm.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(-other + td, expected)
pytest.raises(TypeError, lambda: td - np.array([1]))
pytest.raises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
pytest.raises(TypeError, lambda: td * other)
pytest.raises(TypeError, lambda: other * td)
tm.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
tm.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
tm.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
tm.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
assert s.dtype == object
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
assert s2.dtype == object
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
assert s.dtype == object
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with pytest.raises(TypeError):
l + r
with pytest.raises(TypeError):
l > r
with pytest.raises(TypeError):
l == r
with pytest.raises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
assert result == expected
result = td.median()
expected = to_timedelta('00:00:09')
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
pytest.raises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
assert s.diff().median() == timedelta(days=4)
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
assert s.diff().median() == timedelta(days=6)
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
class TestSlicing(object):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4D'
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2D'
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assert_raises_regex(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assert_raises_regex(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (to_timedelta([pd.NaT, '5 days', '1 hours']) +
to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
| bsd-3-clause |
Luttik/mellowcakes_prototype | setup.py | 1 | 3722 | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'pip_readme.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='freya',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.14',
description='freya',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Luttik/mellowcakes_prototype/branches',
# Author details
author='D.T. Luttik',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='machinelearning email analysis freya',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'pandas', 'scipy', 'scikit-learn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': [],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
],
},
)
| mit |
ARM-software/trappy | trappy/cpu_power.py | 1 | 6417 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the cpu_cooling devices in the current
directory's trace.dat"""
from __future__ import division
from __future__ import unicode_literals
import pandas as pd
from trappy.base import Base
from trappy.dynamic import register_ftrace_parser
def pivot_with_labels(dfr, data_col_name, new_col_name, mapping_label):
"""Pivot a :mod:`pandas.DataFrame` row into columns
:param dfr: The :mod:`pandas.DataFrame` to operate on.
:param data_col_name: The name of the column in the :mod:`pandas.DataFrame`
which contains the values.
:param new_col_name: The name of the column in the :mod:`pandas.DataFrame` that will
become the new columns.
:param mapping_label: A dictionary whose keys are the values in
new_col_name and whose values are their
corresponding name in the :mod:`pandas.DataFrame` to be returned.
:type dfr: :mod:`pandas.DataFrame`
:type data_col_name: str
:type new_col_name: str
:type mapping_label: dict
Example:
>>> dfr_in = pd.DataFrame({'cpus': ["000000f0",
>>> "0000000f",
>>> "000000f0",
>>> "0000000f"
>>> ],
>>> 'freq': [1, 3, 2, 6]})
>>> dfr_in
cpus freq
0 000000f0 1
1 0000000f 3
2 000000f0 2
3 0000000f 6
>>> map_label = {"000000f0": "A15", "0000000f": "A7"}
>>> power.pivot_with_labels(dfr_in, "freq", "cpus", map_label)
A15 A7
0 1 NaN
1 1 3
2 2 3
3 2 6
"""
# There has to be a more "pandas" way of doing this.
col_set = set(dfr[new_col_name])
ret_series = {}
for col in col_set:
try:
label = mapping_label[col]
except KeyError:
available_keys = ", ".join(mapping_label.keys())
error_str = '"{}" not found, available keys: {}'.format(col,
available_keys)
raise KeyError(error_str)
data = dfr[dfr[new_col_name] == col][data_col_name]
ret_series[label] = data
return pd.DataFrame(ret_series).fillna(method="pad")
def num_cpus_in_mask(mask):
"""Return the number of cpus in a cpumask"""
mask = mask.replace(",", "")
value = int(mask, 16)
return bin(value).count("1")
class CpuOutPower(Base):
"""Process the cpufreq cooling power actor data in a ftrace dump"""
unique_word = "thermal_power_cpu_limit"
"""The unique word that will be matched in a trace line"""
name = "cpu_out_power"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "cpus"
"""The Pivot along which the data is orthogonal"""
def get_all_freqs(self, mapping_label):
"""Get a :mod:`pandas.DataFrame` with the maximum frequencies allowed by the governor
:param mapping_label: A dictionary that maps cpumasks to name
of the cpu.
:type mapping_label: dict
:return: freqs are in MHz
"""
dfr = self.data_frame
return pivot_with_labels(dfr, "freq", "cpus", mapping_label) / 1000
register_ftrace_parser(CpuOutPower, "thermal")
class CpuInPower(Base):
"""Process the cpufreq cooling power actor data in a ftrace dump
"""
unique_word = "thermal_power_cpu_get_power"
"""The unique word that will be matched in a trace line"""
name = "cpu_in_power"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "cpus"
"""The Pivot along which the data is orthogonal"""
def _get_load_series(self):
"""get a :mod:`pandas.Series` with the aggregated load"""
dfr = self.data_frame
load_cols = [s for s in dfr.columns if s.startswith("load")]
load_series = dfr[load_cols[0]].copy()
for col in load_cols[1:]:
load_series += dfr[col]
return load_series
def get_load_data(self, mapping_label):
"""Return :mod:`pandas.DataFrame` suitable for plot_load()
:param mapping_label: A Dictionary mapping cluster cpumasks to labels
:type mapping_label: dict
"""
dfr = self.data_frame
load_series = self._get_load_series()
load_dfr = pd.DataFrame({"cpus": dfr["cpus"], "load": load_series})
return pivot_with_labels(load_dfr, "load", "cpus", mapping_label)
def get_normalized_load_data(self, mapping_label):
"""Return a :mod:`pandas.DataFrame` for plotting normalized load data
:param mapping_label: should be a dictionary mapping cluster cpumasks
to labels
:type mapping_label: dict
"""
dfr = self.data_frame
load_series = self._get_load_series()
load_series *= dfr['freq']
for cpumask in mapping_label:
num_cpus = num_cpus_in_mask(cpumask)
idx = dfr["cpus"] == cpumask
max_freq = max(dfr[idx]["freq"])
load_series[idx] = load_series[idx] / (max_freq * num_cpus)
load_dfr = pd.DataFrame({"cpus": dfr["cpus"], "load": load_series})
return pivot_with_labels(load_dfr, "load", "cpus", mapping_label)
def get_all_freqs(self, mapping_label):
"""get a :mod:`pandas.DataFrame` with the "in" frequencies as seen by the governor
.. note::
Frequencies are in MHz
"""
dfr = self.data_frame
return pivot_with_labels(dfr, "freq", "cpus", mapping_label) / 1000
register_ftrace_parser(CpuInPower, "thermal")
| apache-2.0 |
rosswhitfield/mantid | Framework/PythonInterface/mantid/plots/__init__.py | 3 | 1385 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid package
#
#
"""
Functionality for unpacking mantid objects for plotting with matplotlib.
"""
# This file should be left free of PyQt imports to allow quick importing
# of the main package.
from collections.abc import Iterable # noqa: F401
from matplotlib.projections import register_projection
from matplotlib.scale import register_scale
from mantid.plots import datafunctions, axesfunctions, axesfunctions3D # noqa: F401
from mantid.plots.legend import convert_color_to_hex, LegendProperties # noqa: F401
from mantid.plots.datafunctions import get_normalize_by_bin_width # noqa: F401
from mantid.plots.scales import PowerScale, SquareScale # noqa: F401
from mantid.plots.mantidaxes import MantidAxes, MantidAxes3D, WATERFALL_XOFFSET_DEFAULT, WATERFALL_YOFFSET_DEFAULT # noqa: F401
from mantid.plots.utility import (artists_hidden, autoscale_on_update, legend_set_draggable, MantidAxType) # noqa: F401
register_projection(MantidAxes)
register_projection(MantidAxes3D)
register_scale(PowerScale)
register_scale(SquareScale)
| gpl-3.0 |
jrmontag/Data-Science-45min-Intros | ml-basis-expansion-101/kernel.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| unlicense |
Akshay0724/scikit-learn | sklearn/ensemble/voting_classifier.py | 19 | 9888 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..utils.validation import has_fit_parameter, check_is_fitted
def _parallel_fit_estimator(estimator, X, y, sample_weight):
"""Private function used to fit an estimator within a job."""
if sample_weight is not None:
estimator.fit(X, y, sample_weight)
else:
estimator.fit(X, y)
return estimator
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for ``fit``.
If -1, then the number of jobs is set to the number of cores.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array-like, shape = [n_predictions]
The classes labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None, n_jobs=1):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if (self.weights is not None and
len(self.weights) != len(self.estimators)):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
if sample_weight is not None:
for name, step in self.estimators:
if not has_fit_parameter(step, 'sample_weight'):
raise ValueError('Underlying estimator \'%s\' does not support'
' sample weights.' % name)
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
transformed_y = self.le_.transform(y)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_fit_estimator)(clone(clf), X, transformed_y,
sample_weight)
for _, clf in self.estimators)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_samples, n_classifiers]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
bikong2/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/linear_model/plot_theilsen.py | 76 | 3848 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen'}
lw = 2
# #############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt y")
# #############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.scatter(x, y, color='indigo', marker='x', s=40)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred, color=colors[name], linewidth=lw,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.title("Corrupt x")
plt.show()
| gpl-3.0 |
linebp/pandas | pandas/core/sparse/list.py | 9 | 4058 | import warnings
import numpy as np
from pandas.core.base import PandasObject
from pandas.io.formats.printing import pprint_thing
from pandas.core.dtypes.common import is_scalar
from pandas.core.sparse.array import SparseArray
from pandas.util._validators import validate_bool_kwarg
import pandas._libs.sparse as splib
class SparseList(PandasObject):
"""
Data structure for accumulating data to be converted into a
SparseArray. Has similar API to the standard Python list
Parameters
----------
data : scalar or array-like
fill_value : scalar, default NaN
"""
def __init__(self, data=None, fill_value=np.nan):
# see gh-13784
warnings.warn("SparseList is deprecated and will be removed "
"in a future version", FutureWarning, stacklevel=2)
self.fill_value = fill_value
self._chunks = []
if data is not None:
self.append(data)
def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
return '%s\n%s' % (object.__repr__(self), pprint_thing(contents))
def __len__(self):
return sum(len(c) for c in self._chunks)
def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
raise ValueError('%d out of range' % i)
i += len(self)
passed = 0
j = 0
while i >= passed + len(self._chunks[j]):
passed += len(self._chunks[j])
j += 1
return self._chunks[j][i - passed]
def __setitem__(self, i, value):
raise NotImplementedError
@property
def nchunks(self):
return len(self._chunks)
@property
def is_consolidated(self):
return self.nchunks == 1
def consolidate(self, inplace=True):
"""
Internally consolidate chunks of data
Parameters
----------
inplace : boolean, default True
Modify the calling object instead of constructing a new one
Returns
-------
splist : SparseList
If inplace=False, new object, otherwise reference to existing
object
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not inplace:
result = self.copy()
else:
result = self
if result.is_consolidated:
return result
result._consolidate_inplace()
return result
def _consolidate_inplace(self):
new_values = np.concatenate([c.sp_values for c in self._chunks])
new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
new_arr = SparseArray(new_values, sparse_index=new_index,
fill_value=self.fill_value)
self._chunks = [new_arr]
def copy(self):
"""
Return copy of the list
Returns
-------
new_list : SparseList
"""
new_splist = SparseList(fill_value=self.fill_value)
new_splist._chunks = list(self._chunks)
return new_splist
def to_array(self):
"""
Return SparseArray from data stored in the SparseList
Returns
-------
sparr : SparseArray
"""
self.consolidate(inplace=True)
return self._chunks[0]
def append(self, value):
"""
Append element or array-like chunk of data to the SparseList
Parameters
----------
value: scalar or array-like
"""
if is_scalar(value):
value = [value]
sparr = SparseArray(value, fill_value=self.fill_value)
self._chunks.append(sparr)
self._consolidated = False
def _concat_sparse_indexes(indexes):
all_indices = []
total_length = 0
for index in indexes:
# increment by offset
inds = index.to_int_index().indices + total_length
all_indices.append(inds)
total_length += index.length
return splib.IntIndex(total_length, np.concatenate(all_indices))
| bsd-3-clause |
DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/lmfit-py/lmfit/models.py | 7 | 16554 | import numpy as np
from .model import Model
from .lineshapes import (gaussian, lorentzian, voigt, pvoigt, pearson7,
step, rectangle, breit_wigner, logistic,
students_t, lognormal, damped_oscillator,
expgaussian, skewed_gaussian, donaich,
skewed_voigt, exponential, powerlaw, linear,
parabolic)
from . import lineshapes
from .asteval import Interpreter
from .astutils import get_ast_names
class DimensionalError(Exception):
pass
def _validate_1d(independent_vars):
if len(independent_vars) != 1:
raise DimensionalError(
"This model requires exactly one independent variable.")
def index_of(arr, val):
"""return index of array nearest to a value
"""
if val < min(arr):
return 0
return np.abs(arr-val).argmin()
def fwhm_expr(model):
"return constraint expression for fwhm"
return "%.7f*%ssigma" % (model.fwhm_factor, model.prefix)
def guess_from_peak(model, y, x, negative, ampscale=1.0, sigscale=1.0):
"estimate amp, cen, sigma for a peak, create params"
if x is None:
return 1.0, 0.0, 1.0
maxy, miny = max(y), min(y)
maxx, minx = max(x), min(x)
imaxy = index_of(y, maxy)
cen = x[imaxy]
amp = (maxy - miny)*2.0
sig = (maxx-minx)/6.0
halfmax_vals = np.where(y > (maxy+miny)/2.0)[0]
if negative:
imaxy = index_of(y, miny)
amp = -(maxy - miny)*2.0
halfmax_vals = np.where(y < (maxy+miny)/2.0)[0]
if len(halfmax_vals) > 2:
sig = (x[halfmax_vals[-1]] - x[halfmax_vals[0]])/2.0
cen = x[halfmax_vals].mean()
amp = amp*sig*ampscale
sig = sig*sigscale
pars = model.make_params(amplitude=amp, center=cen, sigma=sig)
pars['%ssigma' % model.prefix].set(min=0.0)
return pars
def update_param_vals(pars, prefix, **kwargs):
"""convenience function to update parameter values
with keyword arguments"""
for key, val in kwargs.items():
pname = "%s%s" % (prefix, key)
if pname in pars:
pars[pname].value = val
return pars
COMMON_DOC = """
Parameters
----------
independent_vars: list of strings to be set as variable names
missing: None, 'drop', or 'raise'
None: Do not check for null or missing values.
'drop': Drop null or missing observations in data.
Use pandas.isnull if pandas is available; otherwise,
silently fall back to numpy.isnan.
'raise': Raise a (more helpful) exception when data contains null
or missing values.
prefix: string to prepend to paramter names, needed to add two Models that
have parameter names in common. None by default.
"""
class ConstantModel(Model):
__doc__ = "x -> c" + COMMON_DOC
def __init__(self, *args, **kwargs):
def constant(x, c):
return c
super(ConstantModel, self).__init__(constant, *args, **kwargs)
def guess(self, data, **kwargs):
pars = self.make_params()
pars['%sc' % self.prefix].set(value=data.mean())
return update_param_vals(pars, self.prefix, **kwargs)
class LinearModel(Model):
__doc__ = linear.__doc__ + COMMON_DOC if linear.__doc__ else ""
def __init__(self, *args, **kwargs):
super(LinearModel, self).__init__(linear, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
sval, oval = 0., 0.
if x is not None:
sval, oval = np.polyfit(x, data, 1)
pars = self.make_params(intercept=oval, slope=sval)
return update_param_vals(pars, self.prefix, **kwargs)
class QuadraticModel(Model):
__doc__ = parabolic.__doc__ + COMMON_DOC if parabolic.__doc__ else ""
def __init__(self, *args, **kwargs):
super(QuadraticModel, self).__init__(parabolic, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
a, b, c = 0., 0., 0.
if x is not None:
a, b, c = np.polyfit(x, data, 2)
pars = self.make_params(a=a, b=b, c=c)
return update_param_vals(pars, self.prefix, **kwargs)
ParabolicModel = QuadraticModel
class PolynomialModel(Model):
__doc__ = "x -> c0 + c1 * x + c2 * x**2 + ... c7 * x**7" + COMMON_DOC
MAX_DEGREE=7
DEGREE_ERR = "degree must be an integer less than %d."
def __init__(self, degree, *args, **kwargs):
if not isinstance(degree, int) or degree > self.MAX_DEGREE:
raise TypeError(self.DEGREE_ERR % self.MAX_DEGREE)
self.poly_degree = degree
pnames = ['c%i' % (i) for i in range(degree + 1)]
kwargs['param_names'] = pnames
def polynomial(x, c0=0, c1=0, c2=0, c3=0, c4=0, c5=0, c6=0, c7=0):
return np.polyval([c7, c6, c5, c4, c3, c2, c1, c0], x)
super(PolynomialModel, self).__init__(polynomial, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
pars = self.make_params()
if x is not None:
out = np.polyfit(x, data, self.poly_degree)
for i, coef in enumerate(out[::-1]):
pars['%sc%i'% (self.prefix, i)].set(value=coef)
return update_param_vals(pars, self.prefix, **kwargs)
class GaussianModel(Model):
__doc__ = gaussian.__doc__ + COMMON_DOC if gaussian.__doc__ else ""
fwhm_factor = 2.354820
def __init__(self, *args, **kwargs):
super(GaussianModel, self).__init__(gaussian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class LorentzianModel(Model):
__doc__ = lorentzian.__doc__ + COMMON_DOC if lorentzian.__doc__ else ""
fwhm_factor = 2.0
def __init__(self, *args, **kwargs):
super(LorentzianModel, self).__init__(lorentzian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
return update_param_vals(pars, self.prefix, **kwargs)
class VoigtModel(Model):
__doc__ = voigt.__doc__ + COMMON_DOC if voigt.__doc__ else ""
fwhm_factor = 3.60131
def __init__(self, *args, **kwargs):
super(VoigtModel, self).__init__(voigt, *args, **kwargs)
self.set_param_hint('sigma', min=0)
self.set_param_hint('gamma', expr='%ssigma' % self.prefix)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative,
ampscale=1.5, sigscale=0.65)
return update_param_vals(pars, self.prefix, **kwargs)
class PseudoVoigtModel(Model):
__doc__ = pvoigt.__doc__ + COMMON_DOC if pvoigt.__doc__ else ""
fwhm_factor = 2.0
def __init__(self, *args, **kwargs):
super(PseudoVoigtModel, self).__init__(pvoigt, *args, **kwargs)
self.set_param_hint('fraction', value=0.5)
self.set_param_hint('fwhm', expr=fwhm_expr(self))
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=1.25)
pars['%sfraction' % self.prefix].set(value=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
class Pearson7Model(Model):
__doc__ = pearson7.__doc__ + COMMON_DOC if pearson7.__doc__ else ""
def __init__(self, *args, **kwargs):
super(Pearson7Model, self).__init__(pearson7, *args, **kwargs)
self.set_param_hint('expon', value=1.5)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sexpon' % self.prefix].set(value=1.5)
return update_param_vals(pars, self.prefix, **kwargs)
class StudentsTModel(Model):
__doc__ = students_t.__doc__ + COMMON_DOC if students_t.__doc__ else ""
def __init__(self, *args, **kwargs):
super(StudentsTModel, self).__init__(students_t, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class BreitWignerModel(Model):
__doc__ = breit_wigner.__doc__ + COMMON_DOC if breit_wigner.__doc__ else ""
def __init__(self, *args, **kwargs):
super(BreitWignerModel, self).__init__(breit_wigner, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
pars['%sq' % self.prefix].set(value=1.0)
return update_param_vals(pars, self.prefix, **kwargs)
class LognormalModel(Model):
__doc__ = lognormal.__doc__ + COMMON_DOC if lognormal.__doc__ else ""
def __init__(self, *args, **kwargs):
super(LognormalModel, self).__init__(lognormal, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = self.make_params(amplitude=1.0, center=0.0, sigma=0.25)
pars['%ssigma' % self.prefix].set(min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class DampedOscillatorModel(Model):
__doc__ = damped_oscillator.__doc__ + COMMON_DOC if damped_oscillator.__doc__ else ""
def __init__(self, *args, **kwargs):
super(DampedOscillatorModel, self).__init__(damped_oscillator, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars =guess_from_peak(self, data, x, negative,
ampscale=0.1, sigscale=0.1)
return update_param_vals(pars, self.prefix, **kwargs)
class ExponentialGaussianModel(Model):
__doc__ = expgaussian.__doc__ + COMMON_DOC if expgaussian.__doc__ else ""
def __init__(self, *args, **kwargs):
super(ExponentialGaussianModel, self).__init__(expgaussian, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class SkewedGaussianModel(Model):
__doc__ = skewed_gaussian.__doc__ + COMMON_DOC if skewed_gaussian.__doc__ else ""
fwhm_factor = 2.354820
def __init__(self, *args, **kwargs):
super(SkewedGaussianModel, self).__init__(skewed_gaussian, *args, **kwargs)
self.set_param_hint('sigma', min=0)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative)
return update_param_vals(pars, self.prefix, **kwargs)
class DonaichModel(Model):
__doc__ = donaich.__doc__ + COMMON_DOC if donaich.__doc__ else ""
def __init__(self, *args, **kwargs):
super(DonaichModel, self).__init__(donaich, *args, **kwargs)
def guess(self, data, x=None, negative=False, **kwargs):
pars = guess_from_peak(self, data, x, negative, ampscale=0.5)
return update_param_vals(pars, self.prefix, **kwargs)
class PowerLawModel(Model):
__doc__ = powerlaw.__doc__ + COMMON_DOC if powerlaw.__doc__ else ""
def __init__(self, *args, **kwargs):
super(PowerLawModel, self).__init__(powerlaw, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
try:
expon, amp = np.polyfit(np.log(x+1.e-14), np.log(data+1.e-14), 1)
except:
expon, amp = 1, np.log(abs(max(data)+1.e-9))
pars = self.make_params(amplitude=np.exp(amp), exponent=expon)
return update_param_vals(pars, self.prefix, **kwargs)
class ExponentialModel(Model):
__doc__ = exponential.__doc__ + COMMON_DOC if exponential.__doc__ else ""
def __init__(self, *args, **kwargs):
super(ExponentialModel, self).__init__(exponential, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
try:
sval, oval = np.polyfit(x, np.log(abs(data)+1.e-15), 1)
except:
sval, oval = 1., np.log(abs(max(data)+1.e-9))
pars = self.make_params(amplitude=np.exp(oval), decay=-1.0/sval)
return update_param_vals(pars, self.prefix, **kwargs)
class StepModel(Model):
__doc__ = step.__doc__ + COMMON_DOC if step.__doc__ else ""
def __init__(self, *args, **kwargs):
super(StepModel, self).__init__(step, *args, **kwargs)
def guess(self, data, x=None, **kwargs):
if x is None:
return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center=(xmax+xmin)/2.0)
pars['%ssigma' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class RectangleModel(Model):
__doc__ = rectangle.__doc__ + COMMON_DOC if rectangle.__doc__ else ""
def __init__(self, *args, **kwargs):
super(RectangleModel, self).__init__(rectangle, *args, **kwargs)
self.set_param_hint('midpoint',
expr='(%scenter1+%scenter2)/2.0' % (self.prefix,
self.prefix))
def guess(self, data, x=None, **kwargs):
if x is None:
return
ymin, ymax = min(data), max(data)
xmin, xmax = min(x), max(x)
pars = self.make_params(amplitude=(ymax-ymin),
center1=(xmax+xmin)/4.0,
center2=3*(xmax+xmin)/4.0)
pars['%ssigma1' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
pars['%ssigma2' % self.prefix].set(value=(xmax-xmin)/7.0, min=0.0)
return update_param_vals(pars, self.prefix, **kwargs)
class ExpressionModel(Model):
"""Model from User-supplied expression
%s
""" % COMMON_DOC
idvar_missing = "No independent variable found in\n %s"
idvar_notfound = "Cannot find independent variables '%s' in\n %s"
def __init__(self, expr, independent_vars=None, init_script=None,
*args, **kwargs):
# create ast evaluator, load custom functions
self.asteval = Interpreter()
for name in lineshapes.functions:
self.asteval.symtable[name] = getattr(lineshapes, name, None)
if init_script is not None:
self.asteval.eval(init_script)
# save expr as text, parse to ast, save for later use
self.expr = expr
self.astcode = self.asteval.parse(expr)
# find all symbol names found in expression
sym_names = get_ast_names(self.astcode)
if independent_vars is None and 'x' in sym_names:
independent_vars = ['x']
if independent_vars is None:
raise ValueError(self.idvar_missing % (self.expr))
# determine which named symbols are parameter names,
# try to find all independent variables
idvar_found = [False]*len(independent_vars)
param_names = []
for name in sym_names:
if name in independent_vars:
idvar_found[independent_vars.index(name)] = True
elif name not in self.asteval.symtable:
param_names.append(name)
# make sure we have all independent parameters
if not all(idvar_found):
lost = []
for ix, found in enumerate(idvar_found):
if not found:
lost.append(independent_vars[ix])
lost = ', '.join(lost)
raise ValueError(self.idvar_notfound % (lost, self.expr))
kwargs['independent_vars'] = independent_vars
def _eval(**kwargs):
for name, val in kwargs.items():
self.asteval.symtable[name] = val
return self.asteval.run(self.astcode)
super(ExpressionModel, self).__init__(_eval, *args, **kwargs)
# set param names here, and other things normally
# set in _parse_params(), which will be short-circuited.
self.independent_vars = independent_vars
self._func_allargs = independent_vars + param_names
self._param_names = set(param_names)
self._func_haskeywords = True
self.def_vals = {}
def __repr__(self):
return "<lmfit.ExpressionModel('%s')>" % (self.expr)
def _parse_params(self):
"""ExpressionModel._parse_params is over-written (as `pass`)
to prevent normal parsing of function for parameter names
"""
pass
| apache-2.0 |
kashif/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 73 | 1232 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
lw = 2
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color='gold', lw=lw,
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color='teal', lw=lw,
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), color='yellowgreen', lw=lw,
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), color='cornflowerblue', lw=lw,
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, color='orange', lw=lw,
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), color='darkorchid', lw=lw,
linestyle='--', label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/frame/test_axis_select_reindex.py | 1 | 44950 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange, lzip, u
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, compat, date_range,
isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestDataFrameSelectReindex(TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
assert obj.index.name == 'first'
assert obj.columns.name == 'second'
assert list(df.columns) == ['d', 'e', 'f']
pytest.raises(KeyError, df.drop, ['g'])
pytest.raises(KeyError, df.drop, ['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
# GH 16398
dropped = df.drop([], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.loc[[1, 2], :])
pytest.raises(KeyError, simple.drop, 5)
pytest.raises(KeyError, simple.drop, 'C', 1)
pytest.raises(KeyError, simple.drop, [1, 5])
pytest.raises(KeyError, simple.drop, ['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.loc[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's (GH12392)
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.drop('a')
res2 = df.drop(index='a')
tm.assert_frame_equal(res1, res2)
res1 = df.drop('d', 1)
res2 = df.drop(columns='d')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(labels='e', axis=1)
res2 = df.drop(columns='e')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0)
res2 = df.drop(index=['a'])
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0).drop(['d'], axis=1)
res2 = df.drop(index=['a'], columns=['d'])
tm.assert_frame_equal(res1, res2)
with pytest.raises(ValueError):
df.drop(labels='a', index='b')
with pytest.raises(ValueError):
df.drop(labels='a', columns='b')
with pytest.raises(ValueError):
df.drop(axis=1)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in compat.iteritems(newFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(newFrame):
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in compat.iteritems(nonContigFrame[col]):
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in compat.iteritems(nonContigFrame):
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
assert newFrame.index is self.frame.index
# length zero
newFrame = self.frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(self.frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
assert len(newFrame.index) == len(self.frame.index)
assert len(newFrame.columns) == len(self.frame.columns)
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
tm.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
assert result is not self.frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
assert df.index.name == 'iname'
df = df.reindex(Index(np.arange(10), name='tmpname'))
assert df.index.name == 'tmpname'
s = Series(np.random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
assert df.columns.name == 'iname'
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
assert smaller['A'].dtype == np.int64
bigger = smaller.reindex(self.intframe.index)
assert bigger['A'].dtype == np.float64
smaller = self.intframe.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
tm.assert_series_equal(new_frame['B'], self.frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
new_frame = self.frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method='ffill')
expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method='bfill')
expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(lrange(15))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=lrange(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=lrange(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(15), fill_value=0., axis=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(lrange(5), fill_value=0., axis=1)
expected = df.reindex(columns=lrange(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(lrange(15), fill_value=0)
expected = df.reindex(lrange(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
pytest.raises(ValueError, df.reindex, index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]},
index=[0, 1, 3])
result = df.reindex([0, 1, 3])
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis='index')
assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5],
"C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match='Cannot specify all'):
df.reindex([0, 1], [0], ['A'])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=['A'])
expected = pd.DataFrame({"A": [1, 2]})
assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(['b', 'a'], ['e', 'd'])
assert 'reindex' in str(m[0].message)
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_align(self):
af, bf = self.frame.align(self.frame)
assert af._data is not self.frame._data
af, bf = self.frame.align(self.frame, copy=False)
assert af._data is self.frame._data
# axis = 0
other = self.frame.iloc[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.iloc[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, self.frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
self.frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
assert isinstance(right, Series)
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {c: s for c in self.frame.columns}
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
@pytest.mark.parametrize('meth', ['pad', 'bfill'])
@pytest.mark.parametrize('ax', [0, 1, None])
@pytest.mark.parametrize('fax', [0, 1])
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
def test_align_fill_method(self, how, meth, ax, fax):
self._check_align_fill(how, meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.iloc[0:4, :10]
right = self.frame.iloc[2:, 6:]
empty = self.frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# Items
filtered = self.frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
assert len(filtered.columns) == 2
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
# pass in None
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter()
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(items=None)
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(axis=1)
# test mutually exclusive arguments
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi')
# objects
filtered = self.mixed_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': u('\u2202')})
filtered = df.filter(like='C')
assert 'C' in filtered
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
@pytest.mark.parametrize('name,expected', [
('a', DataFrame({u'a': [1, 2]})),
(u'a', DataFrame({u'a': [1, 2]})),
(u'あ', DataFrame({u'あ': [3, 4]}))
])
def test_filter_unicode(self, name, expected):
# GH13101
df = DataFrame({u'a': [1, 2], u'あ': [3, 4]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
@pytest.mark.parametrize('name', ['a', u'a'])
def test_filter_bytestring(self, name):
# GH13101
df = DataFrame({b'a': [1, 2], b'b': [3, 4]})
expected = DataFrame({b'a': [1, 2]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_select(self):
# deprecated: gh-12410
f = lambda x: x.weekday() == 2
index = self.tsframe.index[[f(x) for x in self.tsframe.index]]
expected_weekdays = self.tsframe.reindex(index=index)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.tsframe.select(f, axis=0)
assert_frame_equal(result, expected_weekdays)
result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# replacement
f = lambda x: x.weekday == 2
result = self.tsframe.loc(axis=0)[f(self.tsframe.index)]
assert_frame_equal(result, expected_weekdays)
crit = lambda x: x in ['B', 'D']
result = self.frame.loc(axis=1)[(self.frame.columns.map(crit))]
expected = self.frame.reindex(columns=['B', 'D'])
assert_frame_equal(result, expected, check_names=False)
# doc example
df = DataFrame({'A': [1, 2, 3]}, index=['foo', 'bar', 'baz'])
crit = lambda x: x in ['bar', 'baz']
with tm.assert_produces_warning(FutureWarning):
expected = df.select(crit)
result = df.loc[df.index.map(crit)]
assert_frame_equal(result, expected, check_names=False)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# negative indices
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=True, axis=0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=False, axis=0)
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
pytest.raises(IndexError, df.take, [3, 1, 2, 30], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, -31], axis=0)
pytest.raises(IndexError, df.take, [3, 1, 2, 5], axis=1)
pytest.raises(IndexError, df.take, [3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# negative indices
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=lrange(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
pytest.raises(ValueError, self.intframe.reindex_axis, rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
newFrame = self.frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(4), columns=lrange(4))
expected = df.reindex(lrange(4)).reindex(columns=lrange(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=lrange(2), columns=lrange(2))
expected = df.reindex(lrange(2)).reindex(columns=lrange(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_reindex_multi_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = pd.MultiIndex.from_product(
[Categorical(['a', 'b', 'c']),
Categorical(date_range("2012-01-01", periods=3, freq='H'))])
df = pd.DataFrame({'a': range(len(midx))}, index=midx)
df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]]
result = df2.reindex(midx)
expected = pd.DataFrame(
{'a': [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx)
assert_frame_equal(result, expected)
data = [[1, 2, 3], [1, 2, 3]]
@pytest.mark.parametrize('actual', [
DataFrame(data=data, index=['a', 'a']),
DataFrame(data=data, index=['a', 'b']),
DataFrame(data=data, index=['a', 'b']).set_index([0, 1]),
DataFrame(data=data, index=['a', 'a']).set_index([0, 1])
])
def test_raise_on_drop_duplicate_index(self, actual):
# issue 19186
level = 0 if isinstance(actual.index, MultiIndex) else None
with pytest.raises(KeyError):
actual.drop('c', level=level, axis=0)
with pytest.raises(KeyError):
actual.T.drop('c', level=level, axis=1)
expected_no_err = actual.drop('c', axis=0, level=level,
errors='ignore')
assert_frame_equal(expected_no_err, actual)
expected_no_err = actual.T.drop('c', axis=1, level=level,
errors='ignore')
assert_frame_equal(expected_no_err.T, actual)
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 2]])
@pytest.mark.parametrize('drop_labels', [[], [1], [2]])
def test_drop_empty_list(self, index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
frame = pd.DataFrame(index=index).drop(drop_labels)
tm.assert_frame_equal(frame, pd.DataFrame(index=expected_index))
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 2, 2]])
@pytest.mark.parametrize('drop_labels', [[1, 4], [4, 5]])
def test_drop_non_empty_list(self, index, drop_labels):
# GH 21494
with pytest.raises(KeyError, match='not found in axis'):
pd.DataFrame(index=index).drop(drop_labels)
| bsd-3-clause |
ssaeger/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.