repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
thientu/scikit-learn
|
sklearn/linear_model/least_angle.py
|
61
|
54324
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False, positive=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
positive : boolean (default=False)
Restrict coefficients to be >= 0.
When using this option together with method 'lasso' the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha (neither will they when using method 'lar'
..). Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent lasso_path function.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.positive = positive
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, positive=False,
precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,
positive=False):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
positive : boolean (default=False)
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coeffiencts up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, positive=False, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
bsd-3-clause
|
korotkyn/ibis
|
ibis/util.py
|
6
|
3963
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import ibis.compat as compat
def guid():
try:
from ibis.comms import uuid4_hex
return uuid4_hex()
except ImportError:
from uuid import uuid4
guid = uuid4()
return guid.hex if compat.PY3 else guid.get_hex()
def bytes_to_uint8_array(val, width=70):
"""
Formats a byte string for use as a uint8_t* literal in C/C++
"""
if len(val) == 0:
return '{}'
lines = []
line = '{' + str(ord(val[0]))
for x in val[1:]:
token = str(ord(x))
if len(line) + len(token) > width:
lines.append(line + ',')
line = token
else:
line += ',%s' % token
lines.append(line)
return '\n'.join(lines) + '}'
def unique_by_key(values, key):
id_to_table = {}
for x in values:
id_to_table[key(x)] = x
return compat.dict_values(id_to_table)
def indent(text, spaces):
block = ' ' * spaces
return '\n'.join(block + x for x in text.split('\n'))
def any_of(values, t):
for x in values:
if isinstance(x, t):
return True
return False
def all_of(values, t):
for x in values:
if not isinstance(x, t):
return False
return True
def promote_list(val):
if not isinstance(val, list):
val = [val]
return val
class IbisSet(object):
def __init__(self, keys=None):
self.keys = keys or []
@classmethod
def from_list(cls, keys):
return IbisSet(keys)
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def add(self, obj):
self.keys.append(obj)
class IbisMap(object):
def __init__(self):
self.keys = []
self.values = []
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def set(self, key, value):
self.keys.append(key)
self.values.append(value)
def get(self, key):
for k, v in zip(self.keys, self.values):
if key.equals(k):
return v
raise KeyError(key)
def is_function(v):
return isinstance(v, (types.FunctionType, types.LambdaType))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
Brought over from from pandas
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.unicode_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def deprecate(f, message):
def g(*args, **kwargs):
print(message)
return f(*args, **kwargs)
return g
|
apache-2.0
|
harisbal/pandas
|
pandas/tests/series/test_analytics.py
|
1
|
82686
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from distutils.version import LooseVersion
from itertools import product
import operator
import numpy as np
from numpy import nan
import pytest
from pandas.compat import PY35, lrange, range
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, Series, bdate_range, compat,
date_range, isna, notna)
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.indexes.timedeltas import Timedelta
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_index_equal,
assert_series_equal)
class TestSeriesAnalytics():
@pytest.mark.parametrize("use_bottleneck", [True, False])
@pytest.mark.parametrize("method, unit", [
("sum", 0.0),
("prod", 1.0)
])
def test_empty(self, method, unit, use_bottleneck):
with pd.option_context("use_bottleneck", use_bottleneck):
# GH 9422 / 18921
# Entirely empty
s = Series([])
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# Skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
# All-NA
s = Series([np.nan])
# NA by default
result = getattr(s, method)()
assert result == unit
# Explicit
result = getattr(s, method)(min_count=0)
assert result == unit
result = getattr(s, method)(min_count=1)
assert isna(result)
# Skipna, default
result = getattr(s, method)(skipna=True)
result == unit
# skipna, explicit
result = getattr(s, method)(skipna=True, min_count=0)
assert result == unit
result = getattr(s, method)(skipna=True, min_count=1)
assert isna(result)
# Mix of valid, empty
s = Series([np.nan, 1])
# Default
result = getattr(s, method)()
assert result == 1.0
# Explicit
result = getattr(s, method)(min_count=0)
assert result == 1.0
result = getattr(s, method)(min_count=1)
assert result == 1.0
# Skipna
result = getattr(s, method)(skipna=True)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=0)
assert result == 1.0
result = getattr(s, method)(skipna=True, min_count=1)
assert result == 1.0
# GH #844 (changed in 9422)
df = DataFrame(np.empty((10, 0)))
assert (getattr(df, method)(1) == unit).all()
s = pd.Series([1])
result = getattr(s, method)(min_count=2)
assert isna(result)
s = pd.Series([np.nan])
result = getattr(s, method)(min_count=2)
assert isna(result)
s = pd.Series([np.nan, 1])
result = getattr(s, method)(min_count=2)
assert isna(result)
@pytest.mark.parametrize('method, unit', [
('sum', 0.0),
('prod', 1.0),
])
def test_empty_multi(self, method, unit):
s = pd.Series([1, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)]))
# 1 / 0 by default
result = getattr(s, method)(level=0)
expected = pd.Series([1, unit], index=['a', 'b'])
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(s, method)(level=0, min_count=0)
expected = pd.Series([1, unit], index=['a', 'b'])
tm.assert_series_equal(result, expected)
# min_count=1
result = getattr(s, method)(level=0, min_count=1)
expected = pd.Series([1, np.nan], index=['a', 'b'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method", ['mean', 'median', 'std', 'var'])
def test_ops_consistency_on_empty(self, method):
# GH 7869
# consistency on empty
# float
result = getattr(Series(dtype=float), method)()
assert isna(result)
# timedelta64[ns]
result = getattr(Series(dtype='m8[ns]'), method)()
assert result is pd.NaT
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
@pytest.mark.parametrize("use_bottleneck", [True, False])
def test_sum_overflow(self, use_bottleneck):
with pd.option_context('use_bottleneck', use_bottleneck):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32', 'int64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert int(result) == v.sum(dtype='int64')
result = s.min(skipna=False)
assert int(result) == 0
result = s.max(skipna=False)
assert int(result) == v[-1]
for dtype in ['float32', 'float64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
result = s.sum(skipna=False)
assert result == v.sum(dtype=dtype)
result = s.min(skipna=False)
assert np.allclose(float(result), 0.0)
result = s.max(skipna=False)
assert np.allclose(float(result), v[-1])
def test_sum(self, string_series):
self._check_stat_op('sum', np.sum, string_series, check_allna=False)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert np.isinf(s.sum())
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with pd.option_context("mode.use_inf_as_na", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
assert np.isinf(res).all()
def test_mean(self, string_series):
self._check_stat_op('mean', np.mean, string_series)
def test_median(self, string_series):
self._check_stat_op('median', np.median, string_series)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
tm.assert_almost_equal(np.median(int_ts), int_ts.median())
def test_prod(self, string_series):
self._check_stat_op('prod', np.prod, string_series)
def test_min(self, string_series):
self._check_stat_op('min', np.min, string_series, check_objects=True)
def test_max(self, string_series):
self._check_stat_op('max', np.max, string_series, check_objects=True)
def test_var_std(self, datetime_series, string_series):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt, string_series)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt, string_series)
result = datetime_series.std(ddof=4)
expected = np.std(datetime_series.values, ddof=4)
assert_almost_equal(result, expected)
result = datetime_series.var(ddof=4)
expected = np.var(datetime_series.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = datetime_series.iloc[[0]]
result = s.var(ddof=1)
assert isna(result)
result = s.std(ddof=1)
assert isna(result)
def test_sem(self, datetime_series, string_series):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt, string_series)
result = datetime_series.sem(ddof=4)
expected = np.std(datetime_series.values,
ddof=4) / np.sqrt(len(datetime_series.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = datetime_series.iloc[[0]]
result = s.sem(ddof=1)
assert isna(result)
@td.skip_if_no_scipy
def test_skew(self, string_series):
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt, string_series)
# test corner cases, skew() returns NaN unless there's at least 3
# values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.skew())
assert np.isnan(df.skew()).all()
else:
assert 0 == s.skew()
assert (df.skew() == 0).all()
@td.skip_if_no_scipy
def test_kurt(self, string_series):
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt, string_series)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
tm.assert_almost_equal(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4
# values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
assert np.isnan(s.kurt())
assert np.isnan(df.kurt()).all()
else:
assert 0 == s.kurt()
assert (df.kurt() == 0).all()
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name='int_data')
result = s.describe()
expected = Series([5, 2, s.std(), 0, 1, 2, 3, 4],
name='int_data',
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name='bool_data')
result = s.describe()
expected = Series([5, 2, False, 3], name='bool_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
s = Series(['a', 'a', 'b', 'c', 'd'], name='str_data')
result = s.describe()
expected = Series([5, 4, 'a', 2], name='str_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
def test_describe_with_tz(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
name = tz_naive_fixture
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
result = s.describe()
expected = Series(
[5, 5, s.value_counts().index[0], 1, start.tz_localize(tz),
end.tz_localize(tz)
],
name=name,
index=['count', 'unique', 'top', 'freq', 'first', 'last']
)
tm.assert_series_equal(result, expected)
def test_argsort(self, datetime_series):
self._check_accum_op('argsort', datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
assert s.dtype == 'datetime64[ns]'
shifted = s.shift(-1)
assert shifted.dtype == 'datetime64[ns]'
assert isna(shifted[4])
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
tm.assert_series_equal(mindexer, Series(mexpected),
check_dtype=False)
tm.assert_series_equal(qindexer, Series(qexpected),
check_dtype=False)
pytest.raises(AssertionError, tm.assert_numpy_array_equal,
qindexer, mindexer)
def test_cumsum(self, datetime_series):
self._check_accum_op('cumsum', datetime_series)
def test_cumprod(self, datetime_series):
self._check_accum_op('cumprod', datetime_series)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(datetime_series.cummin().values,
np.minimum
.accumulate(np.array(datetime_series)))
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(datetime_series.cummax().values,
np.maximum
.accumulate(np.array(datetime_series)))
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1'
]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3'
]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'1 min', ]))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'1 min',
'1 min',
'1 min', ]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'2 min',
'NaT',
'3 min', ]))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'2 min',
'2 min',
'3 min', ]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_npdiff(self):
pytest.skip("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, string_series_,
check_objects=False, check_allna=False):
with pd.option_context('use_bottleneck', False):
f = getattr(Series, name)
# add some NaNs
string_series_[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max', 'min']:
ds = Series(date_range('1/1/2001', periods=10))
pytest.raises(TypeError, f, ds)
# skipna or no
assert notna(f(string_series_))
assert isna(f(string_series_, skipna=False))
# check the result is correct
nona = string_series_.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(string_series_), alternate(nona.values))
allna = string_series_ * nan
if check_allna:
assert np.isnan(f(allna))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
items = [0]
items.extend(lrange(2 ** 40, 2 ** 40 + 1000))
s = Series(items, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
assert res == exp
# check on string data
if name not in ['sum', 'min', 'max']:
pytest.raises(TypeError, f, Series(list('abc')))
# Invalid axis.
pytest.raises(ValueError, f, string_series_, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in compat.signature(f).args:
tm.assert_raises_regex(NotImplementedError, name, f,
string_series_, numeric_only=True)
def _check_accum_op(self, name, datetime_series_, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(func(datetime_series_).values,
func(np.array(datetime_series_)),
check_dtype=check_dtype)
# with missing values
ts = datetime_series_.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected,
check_dtype=False)
def test_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
with tm.assert_produces_warning(FutureWarning):
result = s.compress(cond)
tm.assert_series_equal(result, expected)
def test_numpy_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(np.compress(cond, s), expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.compress,
cond, s, axis=1)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.compress,
cond, s, out=s)
def test_round(self, datetime_series):
datetime_series.index.name = "index_name"
result = datetime_series.round(2)
expected = Series(np.round(datetime_series.values, 2),
index=datetime_series.index, name='ts')
assert_series_equal(result, expected)
assert result.name == datetime_series.name
def test_numpy_round(self):
# See gh-12600
s = Series([1.53, 1.36, 0.06])
out = np.round(s, decimals=0)
expected = Series([2., 1., 0.])
assert_series_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(s, decimals=0, out=s)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip(
'build in round cannot be overridden prior to Python 3')
s = Series([1.123, 2.123, 3.123], index=lrange(3))
result = round(s)
expected_rounded0 = Series([1., 2., 3.], index=lrange(3))
tm.assert_series_equal(result, expected_rounded0)
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=lrange(3))
result = round(s, decimals)
tm.assert_series_equal(result, expected_rounded)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
assert not isinstance(result, Series)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
assert not bool_series.all()
assert bool_series.any()
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
assert 'abc' == s.any() # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
assert s1.all(skipna=False) # nan && True => True
assert s1.all(skipna=True)
assert np.isnan(s2.any(skipna=False)) # nan || False => nan
assert not s2.any(skipna=True)
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
pytest.raises(NotImplementedError, s.any, bool_only=True, level=0)
pytest.raises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
pytest.raises(NotImplementedError, s.any, bool_only=True)
pytest.raises(NotImplementedError, s.all, bool_only=True)
def test_modulo(self):
with np.errstate(all='ignore'):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
assert not result.equals(result2)
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corr(self, datetime_series):
import scipy.stats as stats
# full overlap
tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]),
1)
assert isna(datetime_series[:15].corr(datetime_series[5:],
min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.corr(ts2, min_periods=12))
# No overlap
assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.corr(cp))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
tm.assert_almost_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
tm.assert_almost_equal(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
tm.assert_almost_equal(result, expected)
# these methods got rewritten in 0.8
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
pytest.skip("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series(
[-0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -
0.06430576, -2.09704447, 0.40660407, -0.89926396, 0.94209606])
B = Series(
[-1.01270225, -0.62210117, -1.56895827, 0.59592943, -0.01680292,
1.17258718, -1.06009347, -0.10222060, -0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
tm.assert_almost_equal(A.corr(B, method='kendall'), kexp)
tm.assert_almost_equal(A.corr(B, method='spearman'), sexp)
def test_corr_invalid_method(self):
# GH PR #22298
s1 = pd.Series(np.random.randn(10))
s2 = pd.Series(np.random.randn(10))
msg = ("method must be either 'pearson', 'spearman', "
"or 'kendall'")
with tm.assert_raises_regex(ValueError, msg):
s1.corr(s2, method="____")
def test_corr_callable_method(self, datetime_series):
# simple correlation example
# returns 1 if exact equality, 0 otherwise
my_corr = lambda a, b: 1. if (a == b).all() else 0.
# simple example
s1 = Series([1, 2, 3, 4, 5])
s2 = Series([5, 4, 3, 2, 1])
expected = 0
tm.assert_almost_equal(
s1.corr(s2, method=my_corr),
expected)
# full overlap
tm.assert_almost_equal(datetime_series.corr(
datetime_series, method=my_corr), 1.)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].corr(
datetime_series[5:], method=my_corr), 1.)
# No overlap
assert np.isnan(datetime_series[::2].corr(
datetime_series[1::2], method=my_corr))
# dataframe example
df = pd.DataFrame([s1, s2])
expected = pd.DataFrame([
{0: 1., 1: 0}, {0: 0, 1: 1.}])
tm.assert_almost_equal(
df.transpose().corr(method=my_corr), expected)
def test_cov(self, datetime_series):
# full overlap
tm.assert_almost_equal(datetime_series.cov(datetime_series),
datetime_series.std() ** 2)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].cov(datetime_series[5:]),
datetime_series[5:15].std() ** 2)
# No overlap
assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.cov(cp))
# min_periods
assert isna(datetime_series[:15].cov(datetime_series[5:],
min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.cov(ts2, min_periods=12))
def test_count(self, datetime_series):
assert datetime_series.count() == len(datetime_series)
datetime_series[::2] = np.NaN
assert datetime_series.count() == np.isfinite(datetime_series).sum()
mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
assert np.all(result == expected.values)
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
pytest.raises(Exception, a.dot, a.values[:3])
pytest.raises(ValueError, a.dot, b.T)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
def test_matmul(self):
# matmul test is for GH #10259
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
# Series @ DataFrame
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# Series @ Series
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# vector (1D np.array) @ Series (__rmatmul__)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# vector (1D list) @ Series (__rmatmul__)
result = operator.matmul(a.values.tolist(), a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# matrix (2D np.array) @ Series (__rmatmul__)
result = operator.matmul(b.T.values, a)
expected = np.dot(b.T.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# matrix (2D nested lists) @ Series (__rmatmul__)
result = operator.matmul(b.T.values.tolist(), a)
expected = np.dot(b.T.values, a.values)
assert_almost_equal(result, expected)
# mixed dtype DataFrame @ Series
a['p'] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# different dtypes DataFrame @ Series
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
pytest.raises(Exception, a.dot, a.values[:3])
pytest.raises(ValueError, a.dot, b.T)
def test_clip(self, datetime_series):
val = datetime_series.median()
assert datetime_series.clip_lower(val).min() == val
assert datetime_series.clip_upper(val).max() == val
assert datetime_series.clip(lower=val).min() == val
assert datetime_series.clip(upper=val).max() == val
result = datetime_series.clip(-0.5, 0.5)
expected = np.clip(datetime_series, -0.5, 0.5)
assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]), Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime(
[np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
lower = s.clip_lower(thresh)
upper = s.clip_upper(thresh)
assert lower[notna(lower)].min() == thresh
assert upper[notna(upper)].max() == thresh
assert list(isna(s)) == list(isna(lower))
assert list(isna(s)) == list(isna(upper))
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
s = Series([1, 2, 3])
assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
assert_series_equal(s.clip(upper=np.nan, lower=np.nan),
Series([1, 2, 3]))
# GH #19992
assert_series_equal(s.clip(lower=[0, 4, np.nan]),
Series([1, 4, np.nan]))
assert_series_equal(s.clip(upper=[1, np.nan, 1]),
Series([1, np.nan, 1]))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH #15390
original = pd.Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = pd.Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH 11838
# naive and tz-aware datetimes
t = Timestamp('2015-12-01 09:30:30')
s = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:31:00')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:30:30')])
assert_series_equal(result, expected)
t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
s = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:31:00', tz='US/Eastern')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:30:30', tz='US/Eastern')])
assert_series_equal(result, expected)
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum,
'cumprod': np.cumprod,
'cummin': cummin,
'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse,
'cumprod': cpe,
'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
# GH: 16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
s = Series(list('abcdefghijk' * 10 ** 5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, 'a', 'b', 'G', 'Y', 'Z', 'E',
'K', 'E', 'S', 'I', 'R', 'R'] * 6
assert s.isin(in_list).sum() == 200000
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with pytest.raises(TypeError):
s.isin('a')
with pytest.raises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range('jan-01-2013', 'jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5), unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
s = Series(["a", "b"])
expected = Series([False, False])
result = s.isin(empty)
tm.assert_series_equal(expected, result)
def test_timedelta64_analytics(self):
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
assert result == 0
result = td.idxmax()
assert result == 2
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
assert result == 1
result = td.idxmax()
assert result == 2
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
# result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
assert result == expected
result = td.min()
expected = Timedelta('1 days')
assert result == expected
def test_idxmin(self, string_series):
# test idxmin
# _check_stat_op approach can not be used here because of isna check.
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmin()] == string_series.min()
assert isna(string_series.idxmin(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmin()] == nona.min()
assert (nona.index.values.tolist().index(nona.idxmin()) ==
nona.values.argmin())
# all NaNs
allna = string_series * nan
assert isna(allna.idxmin())
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
assert result == 0
s[0] = np.nan
result = s.idxmin()
assert result == 1
def test_numpy_argmin_deprecated(self):
# See gh-16830
data = np.arange(1, 11)
s = Series(data, index=data)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# The deprecation of Series.argmin also causes a deprecation
# warning when calling np.argmin. This behavior is temporary
# until the implementation of Series.argmin is corrected.
result = np.argmin(s)
assert result == 1
with tm.assert_produces_warning(FutureWarning):
# argmin is aliased to idxmin
result = s.argmin()
assert result == 1
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argmin,
s, out=data)
def test_idxmax(self, string_series):
# test idxmax
# _check_stat_op approach can not be used here because of isna check.
# add some NaNs
string_series[5:15] = np.NaN
# skipna or no
assert string_series[string_series.idxmax()] == string_series.max()
assert isna(string_series.idxmax(skipna=False))
# no NaNs
nona = string_series.dropna()
assert nona[nona.idxmax()] == nona.max()
assert (nona.index.values.tolist().index(nona.idxmax()) ==
nona.values.argmax())
# all NaNs
allna = string_series * nan
assert isna(allna.idxmax())
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
assert result == 5
s[5] = np.nan
result = s.idxmax()
assert result == 4
# Float64Index
# GH 5914
s = pd.Series([1, 2, 3], [1.1, 2.1, 3.1])
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
s = pd.Series(s.index, s.index)
result = s.idxmax()
assert result == 3.1
result = s.idxmin()
assert result == 1.1
def test_numpy_argmax_deprecated(self):
# See gh-16830
data = np.arange(1, 11)
s = Series(data, index=data)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# The deprecation of Series.argmax also causes a deprecation
# warning when calling np.argmax. This behavior is temporary
# until the implementation of Series.argmax is corrected.
result = np.argmax(s)
assert result == 10
with tm.assert_produces_warning(FutureWarning):
# argmax is aliased to idxmax
result = s.argmax()
assert result == 10
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.argmax,
s, out=data)
def test_ptp(self):
# GH21614
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert np.ptp(ser) == np.ptp(arr)
# GH11163
s = Series([3, 5, np.nan, -3, 10])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert s.ptp() == 13
assert pd.isna(s.ptp(skipna=False))
mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(s.ptp(level=0, skipna=False), expected)
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
with pytest.raises(TypeError):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s.ptp()
with pytest.raises(NotImplementedError):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s.ptp(numeric_only=True)
def test_empty_timeseries_redections_return_nat(self):
# covers #11245
for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
assert Series([], dtype=dtype).min() is pd.NaT
assert Series([], dtype=dtype).max() is pd.NaT
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name='x')
expected = Series(s.values.repeat(2), name='x',
index=s.index.values.repeat(2))
assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.repeat, s, 2, axis=0)
def test_searchsorted(self):
s = Series([1, 2, 3])
idx = s.searchsorted(1, side='left')
tm.assert_numpy_array_equal(idx, np.array([0], dtype=np.intp))
idx = s.searchsorted(1, side='right')
tm.assert_numpy_array_equal(idx, np.array([1], dtype=np.intp))
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
assert r == e
r = s.searchsorted([30])
e = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
assert r == e
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing is True
s = Series(pd.date_range('20130101', periods=10))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(list(reversed(s.tolist())))
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level='A')
assert_series_equal(backwards, res)
res = s.sort_index(level=['A', 'B'])
assert_series_equal(backwards, res)
res = s.sort_index(level='A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sort_index(level=['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_apply_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = s.apply(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
def test_shift_int(self, datetime_series):
ts = datetime_series.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
assert np.all(sp1.values.codes[:1] == -1)
assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
assert np.all(sn2.values.codes[-2:] == -1)
assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_unstack(self):
from numpy import nan
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values},
index=exp_index).sort_index(level=0)
unstacked = s.unstack(0).sort_index()
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1, 2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'
], ['a', 'a', 'b', 'a', 'b'],
[1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_value_counts_datetime(self):
# most dtypes are tested in test_base.py
values = [pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 11:00')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'])
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_datetime_tz(self):
values = [pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'], tz='US/Eastern')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_period(self):
values = [pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-03', freq='M')]
exp_idx = pd.PeriodIndex(['2011-01', '2011-03', '2011-02'], freq='M')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.PeriodIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_ordered(self):
# most dtypes are tested in test_base.py
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=True)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_not_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=False)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
main_dtypes = [
'datetime',
'datetimetz',
'timedelta',
'int8',
'int16',
'int32',
'int64',
'float32',
'float64',
'uint8',
'uint16',
'uint32',
'uint64'
]
@pytest.fixture
def s_main_dtypes():
"""A DataFrame with many dtypes
* datetime
* datetimetz
* timedelta
* [u]int{8,16,32,64}
* float{32,64}
The columns are the name of the dtype.
"""
df = pd.DataFrame(
{'datetime': pd.to_datetime(['2003', '2002',
'2001', '2002',
'2005']),
'datetimetz': pd.to_datetime(
['2003', '2002',
'2001', '2002',
'2005']).tz_localize('US/Eastern'),
'timedelta': pd.to_timedelta(['3d', '2d', '1d',
'2d', '5d'])})
for dtype in ['int8', 'int16', 'int32', 'int64',
'float32', 'float64',
'uint8', 'uint16', 'uint32', 'uint64']:
df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
return df
@pytest.fixture(params=main_dtypes)
def s_main_dtypes_split(request, s_main_dtypes):
"""Each series in s_main_dtypes."""
return s_main_dtypes[request.param]
class TestMode(object):
@pytest.mark.parametrize('dropna, expected', [
(True, Series([], dtype=np.float64)),
(False, Series([], dtype=np.float64))
])
def test_mode_empty(self, dropna, expected):
s = Series([], dtype=np.float64)
result = s.mode(dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, data, expected', [
(True, [1, 1, 1, 2], [1]),
(True, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
(False, [1, 1, 1, 2], [1]),
(False, [1, 1, 1, 2, 3, 3, 3], [1, 3]),
])
@pytest.mark.parametrize(
'dt',
list(np.typecodes['AllInteger'] + np.typecodes['Float'])
)
def test_mode_numerical(self, dropna, data, expected, dt):
s = Series(data, dtype=dt)
result = s.mode(dropna)
expected = Series(expected, dtype=dt)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, expected', [
(True, [1.0]),
(False, [1, np.nan]),
])
def test_mode_numerical_nan(self, dropna, expected):
s = Series([1, 1, 2, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, expected1, expected2, expected3', [
(True, ['b'], ['bar'], ['nan']),
(False, ['b'], [np.nan], ['nan'])
])
def test_mode_str_obj(self, dropna, expected1, expected2, expected3):
# Test string and object types.
data = ['a'] * 2 + ['b'] * 3
s = Series(data, dtype='c')
result = s.mode(dropna)
expected1 = Series(expected1, dtype='c')
tm.assert_series_equal(result, expected1)
data = ['foo', 'bar', 'bar', np.nan, np.nan, np.nan]
s = Series(data, dtype=object)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected2)
data = ['foo', 'bar', 'bar', np.nan, np.nan, np.nan]
s = Series(data, dtype=object).astype(str)
result = s.mode(dropna)
expected3 = Series(expected3, dtype=str)
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, ['foo'], ['foo']),
(False, ['foo'], [np.nan])
])
def test_mode_mixeddtype(self, dropna, expected1, expected2):
s = Series([1, 'foo', 'foo'])
result = s.mode(dropna)
expected = Series(expected1)
tm.assert_series_equal(result, expected)
s = Series([1, 'foo', 'foo', np.nan, np.nan, np.nan])
result = s.mode(dropna)
expected = Series(expected2, dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, ['1900-05-03', '2011-01-03', '2013-01-02'],
['2011-01-03', '2013-01-02']),
(False, [np.nan], [np.nan, '2011-01-03', '2013-01-02']),
])
def test_mode_datetime(self, dropna, expected1, expected2):
s = Series(['2011-01-03', '2013-01-02',
'1900-05-03', 'nan', 'nan'], dtype='M8[ns]')
result = s.mode(dropna)
expected1 = Series(expected1, dtype='M8[ns]')
tm.assert_series_equal(result, expected1)
s = Series(['2011-01-03', '2013-01-02', '1900-05-03',
'2011-01-03', '2013-01-02', 'nan', 'nan'],
dtype='M8[ns]')
result = s.mode(dropna)
expected2 = Series(expected2, dtype='M8[ns]')
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, ['-1 days', '0 days', '1 days'], ['2 min', '1 day']),
(False, [np.nan], [np.nan, '2 min', '1 day']),
])
def test_mode_timedelta(self, dropna, expected1, expected2):
# gh-5986: Test timedelta types.
s = Series(['1 days', '-1 days', '0 days', 'nan', 'nan'],
dtype='timedelta64[ns]')
result = s.mode(dropna)
expected1 = Series(expected1, dtype='timedelta64[ns]')
tm.assert_series_equal(result, expected1)
s = Series(['1 day', '1 day', '-1 day', '-1 day 2 min',
'2 min', '2 min', 'nan', 'nan'],
dtype='timedelta64[ns]')
result = s.mode(dropna)
expected2 = Series(expected2, dtype='timedelta64[ns]')
tm.assert_series_equal(result, expected2)
@pytest.mark.parametrize('dropna, expected1, expected2, expected3', [
(True, Categorical([1, 2], categories=[1, 2]),
Categorical(['a'], categories=[1, 'a']),
Categorical([3, 1], categories=[3, 2, 1], ordered=True)),
(False, Categorical([np.nan], categories=[1, 2]),
Categorical([np.nan, 'a'], categories=[1, 'a']),
Categorical([np.nan, 3, 1], categories=[3, 2, 1], ordered=True)),
])
def test_mode_category(self, dropna, expected1, expected2, expected3):
s = Series(Categorical([1, 2, np.nan, np.nan]))
result = s.mode(dropna)
expected1 = Series(expected1, dtype='category')
tm.assert_series_equal(result, expected1)
s = Series(Categorical([1, 'a', 'a', np.nan, np.nan]))
result = s.mode(dropna)
expected2 = Series(expected2, dtype='category')
tm.assert_series_equal(result, expected2)
s = Series(Categorical([1, 1, 2, 3, 3, np.nan, np.nan],
categories=[3, 2, 1], ordered=True))
result = s.mode(dropna)
expected3 = Series(expected3, dtype='category')
tm.assert_series_equal(result, expected3)
@pytest.mark.parametrize('dropna, expected1, expected2', [
(True, [2**63], [1, 2**63]),
(False, [2**63], [1, 2**63])
])
def test_mode_intoverflow(self, dropna, expected1, expected2):
# Test for uint64 overflow.
s = Series([1, 2**63, 2**63], dtype=np.uint64)
result = s.mode(dropna)
expected1 = Series(expected1, dtype=np.uint64)
tm.assert_series_equal(result, expected1)
s = Series([1, 2**63], dtype=np.uint64)
result = s.mode(dropna)
expected2 = Series(expected2, dtype=np.uint64)
tm.assert_series_equal(result, expected2)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
expected = Series(['foo', np.nan])
s = Series([1, 'foo', 'foo', np.nan, np.nan])
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = s.mode(dropna=False)
result = result.sort_values().reset_index(drop=True)
tm.assert_series_equal(result, expected)
def assert_check_nselect_boundary(vals, dtype, method):
# helper function for 'test_boundary_{dtype}' tests
s = Series(vals, dtype=dtype)
result = getattr(s, method)(3)
expected_idxr = [0, 1, 2] if method == 'nsmallest' else [3, 2, 1]
expected = s.loc[expected_idxr]
tm.assert_series_equal(result, expected)
class TestNLargestNSmallest(object):
@pytest.mark.parametrize(
"r", [Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
Series(list('abcde')),
Series(list('abcde'), dtype='category')])
def test_error(self, r):
dt = r.dtype
msg = ("Cannot use method 'n(larg|small)est' with "
"dtype {dt}".format(dt=dt))
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assert_raises_regex(TypeError, msg):
method(arg)
def test_nsmallest_nlargest(self, s_main_dtypes_split):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
s = s_main_dtypes_split
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
def test_misc(self):
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with tm.assert_raises_regex(ValueError, msg):
s.nsmallest(keep='invalid')
with tm.assert_raises_regex(ValueError, msg):
s.nlargest(keep='invalid')
# GH 15297
s = Series([1] * 5, index=[1, 2, 3, 4, 5])
expected_first = Series([1] * 3, index=[1, 2, 3])
expected_last = Series([1] * 3, index=[5, 4, 3])
result = s.nsmallest(3)
assert_series_equal(result, expected_first)
result = s.nsmallest(3, keep='last')
assert_series_equal(result, expected_last)
result = s.nlargest(3)
assert_series_equal(result, expected_first)
result = s.nlargest(3, keep='last')
assert_series_equal(result, expected_last)
@pytest.mark.parametrize('n', range(1, 5))
def test_n(self, n):
# GH 13412
s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = s.nlargest(n)
expected = s.sort_values(ascending=False).head(n)
assert_series_equal(result, expected)
result = s.nsmallest(n)
expected = s.sort_values().head(n)
assert_series_equal(result, expected)
def test_boundary_integer(self, nselect_method, any_int_dtype):
# GH 21426
dtype_info = np.iinfo(any_int_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val, min_val + 1, max_val - 1, max_val]
assert_check_nselect_boundary(vals, any_int_dtype, nselect_method)
def test_boundary_float(self, nselect_method, float_dtype):
# GH 21426
dtype_info = np.finfo(float_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
min_2nd, max_2nd = np.nextafter(
[min_val, max_val], 0, dtype=float_dtype)
vals = [min_val, min_2nd, max_2nd, max_val]
assert_check_nselect_boundary(vals, float_dtype, nselect_method)
@pytest.mark.parametrize('dtype', ['datetime64[ns]', 'timedelta64[ns]'])
def test_boundary_datetimelike(self, nselect_method, dtype):
# GH 21426
# use int64 bounds and +1 to min_val since true minimum is NaT
# (include min_val/NaT at end to maintain same expected_idxr)
dtype_info = np.iinfo('int64')
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
assert_check_nselect_boundary(vals, dtype, nselect_method)
def test_duplicate_keep_all_ties(self):
# see gh-16818
s = Series([10, 9, 8, 7, 7, 7, 7, 6])
result = s.nlargest(4, keep='all')
expected = Series([10, 9, 8, 7, 7, 7, 7])
assert_series_equal(result, expected)
result = s.nsmallest(2, keep='all')
expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])
assert_series_equal(result, expected)
class TestCategoricalSeriesAnalytics(object):
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
assert result == 2
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
pytest.raises(TypeError, lambda: cat.min())
pytest.raises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == "b"
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
assert np.isnan(_min)
assert _max == 1
def test_value_counts(self):
# GH 12835
cats = Categorical(list('abcccb'), categories=list('cabd'))
s = Series(cats, name='xxx')
res = s.value_counts(sort=False)
exp_index = CategoricalIndex(list('cabd'), categories=cats.categories)
exp = Series([3, 1, 2, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp_index = CategoricalIndex(list('cbad'), categories=cats.categories)
exp = Series([3, 2, 1, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
# check object dtype handles the Series.name as the same
# (tested in test_base.py)
s = Series(["a", "b", "c", "c", "c", "b"], name='xxx')
res = s.value_counts()
exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# see gh-9443
# sanity check
s = Series(["a", "b", "a"], dtype="category")
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# same Series via two different constructions --> same behaviour
series = [
Series(["a", "b", None, "a", None, None], dtype="category"),
Series(Categorical(["a", "b", None, "a", None, None],
categories=["a", "b"]))
]
for s in series:
# None is a NaN value, so we exclude its count here
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# we don't exclude the count of None and sort by counts
exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
res = s.value_counts(dropna=False)
tm.assert_series_equal(res, exp)
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
res = s.value_counts(dropna=False, sort=False)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]",
pytest.param("datetime64[D]",
marks=pytest.mark.xfail(reason="GH#7996", strict=True))]
)
@pytest.mark.parametrize("is_ordered", [True, False])
def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array,
ordered=is_ordered))
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, False])
tm.assert_series_equal(tc1.duplicated(keep='last'), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep='last'),
tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, True])
tm.assert_series_equal(tc1.duplicated(keep=False), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(
input2, categories=cat_array, ordered=is_ordered)
)
expected = Series([False, False, False, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(), expected)
tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, False, False, False])
tm.assert_series_equal(tc2.duplicated(keep='last'), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep='last'),
tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(keep=False), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
@pytest.mark.parametrize("is_ordered", [True, False])
def test_drop_duplicates_categorical_bool(self, is_ordered):
tc = Series(Categorical([True, False, True, False],
categories=[True, False], ordered=is_ordered))
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
tm.assert_series_equal(tc.duplicated(keep='last'), expected)
tm.assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
tm.assert_series_equal(tc.duplicated(keep=False), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
|
bsd-3-clause
|
fyffyt/scikit-learn
|
examples/plot_johnson_lindenstrauss_bound.py
|
127
|
7477
|
r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
|
bsd-3-clause
|
agrav/freesif
|
examples/plot_motion_raos.py
|
1
|
1337
|
# -*- coding: utf-8 -*-
"""Plot motion RAOs from G1.SIF file
"""
import freesif as fs
import matplotlib.pyplot as plt
# convert data into HDF5 format
#fs.sif2hdf5('../test_files/slowdrift_G1.SIF')
# open the hdf5 file (returns a File object)
#f = fs.open_hdf5('G1.h5')
# access the HydroData object via the dict interface
#d = f['G1']
# alternative short hand method to do above 3 steps in once:
d = fs.open_sif('../tests/files/hydro/slowdrift_G1.SIF')
# get motion data
dirs = d.get_directions('degrees')
periods = d.get_periods()
motions = d.get_motion_raos()
# plot data
resp_names = ['Surge', 'Sway', 'Heave', 'Roll', 'Pitch', 'Yaw']
markers = ['.-','o-','v-','^-','<-','>-','1-','2-','3-','4-','8-','s-',
'p-','*-','h-','H-','+-','x-','D-','d-','|-','_-']
fig, axes = plt.subplots(3,2, figsize=(10,12), sharex=False)
for i, ax in enumerate(axes.flat):
for j in range(len(dirs)):
ax.plot(periods, abs(motions[i,j,:]), markers[j])
ax.set_title(resp_names[i])
ax.grid()
ax.set_xlabel('[s]')
ax.set_ylabel('[m/m]' if i<3 else 'rad/m')
leg = fig.legend(ax.lines, dirs, loc = 'lower center', ncol=7)
leg.set_title('Wave directions [deg]')
fig.tight_layout(rect=(0.0,0.08,1,.95))
fig.suptitle('Motion RAOs', size=20)
#fig.savefig('motions.png', dpi=200)
plt.show()
# close data
d.close()
|
mit
|
frederick623/HTI
|
cash_pb_report/bin/cash_pb_report.py
|
2
|
23899
|
import os
import sys
import re
import glob
import datetime
import numpy as np
import pandas as pd
import django
import multiprocessing
import traceback
from dateutil.rrule import DAILY, rrule, MO, TU, WE, TH, FR
from functools import reduce, partial
from django.conf import settings
from django.template.loader import get_template
from django import template
from weasyprint import HTML, CSS
# CUR_PATH = os.path.dirname(__file__)
CUR_PATH = r"\\p7fs0003\nd\3033-Horizon-FA-Share\Cash_PB_Reports"
CONST_DIC = {
"template_dirs": [os.path.join(CUR_PATH, "template")],
"cur_dt": "",
"cln_detail_path": os.path.join(r"\\p7fs0003\nd\3033-Horizon-FA-Share\Cash_PB_Reports\mss_pb_reports", "MSSE Daily Balance (PB) ????????.xls"),
"client_report_file": os.path.join(CUR_PATH, "config/pb_client_account.xlsx"),
"daily_data_path": os.path.join(CUR_PATH, "data/daily_????????.xlsx"),
"td_pos_rpt_template": os.path.join(CUR_PATH, r"template\TD Position Details Report Template.html"),
"td_pos_rpt_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\TD Position Details Report.pdf"),
"sd_pos_rpt_template": os.path.join(CUR_PATH, r"template\SD Position Details Report Template.html"),
"sd_pos_rpt_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\SD Position Details Report.pdf"),
"cash_proj_rpt_template": os.path.join(CUR_PATH, r"template\Cash Projection Summary Template.html"),
"cash_proj_rpt_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\Cash Projection Summary.pdf"),
"margin_rpt_template": os.path.join(CUR_PATH, r"template\Margin Requirement Template.html"),
"margin_rpt_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\Margin Requirement.pdf"),
"acc_summ_template": os.path.join(CUR_PATH, r"template\Account Summary Template.html"),
"acc_summ_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\Account Summary.pdf"),
"sbl_stk_template": os.path.join(CUR_PATH, r"template\SBL Stock Template.html"),
"sbl_stk_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\SBL Stock.pdf"),
"sbl_coll_template": os.path.join(CUR_PATH, r"template\SBL Collateral Template.html"),
"sbl_coll_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\SBL Collateral.pdf"),
"mtd_accrued_template": os.path.join(CUR_PATH, r"template\MTD Accrued Template.html"),
"mtd_accrued_output": os.path.join(CUR_PATH,r"output\[client]\[YYYYMMDD]\MTD Accrued.pdf"),
}
ins_type_dict = {
"L": "Equity Long",
"S": "Equity Short",
"X": "Equity Borrow Excess",
"B": "Fixed Income",
"O": "Stock Options",
"F": "Index Futures/Options",
}
ins_type_order = {
"L": "1L",
"S": "2S",
"X": "3X",
"B": "4B",
"O": "5O",
"F": "6F",
}
company_dict = {
"L": "Cash Market",
"S": "Cash Market",
"X": "Cash Market",
"B": "Cash Market",
"O": "Cash Market",
"F": "Futures & Options Market",
}
cash_type_order = {
"Opening": "1",
"Movement": "2",
"Projected": "3",
}
rate_div_dict = {
"HKD": 365,
"USD": 360,
}
MARGIN_BRACKET_DECIMAL_FORMATTER = lambda x: '(%s)' % '{0:,.2f}'.format(abs(x)) if x < 0 else ('{0:,.2f}'.format(x) if x > 0 else '-' if x == 0 else "n.a.")
BRACKET_DECIMAL_FORMATTER = lambda x: '(%s)' % '{0:,.2f}'.format(abs(x)) if x < 0 else ('{0:,.2f}'.format(x) if x > 0 else '-')
BRACKET_FOUR_DECIMAL_FORMATTER = lambda x: '(%s)' % '{0:,.4f}'.format(abs(x)) if x < 0 else ('{0:,.4f}'.format(x) if x > 0 else '-')
BRACKET_FORMATTER = lambda x: '(%s)' % '{0:,}'.format(abs(x)) if x < 0 else ('{0:,}'.format(x) if x > 0 else '-')
PERCENTAGE_FORMATTER = lambda x : "{:.0%}".format(x)
PERCENTAGE_DECIMAL_FORMATTER = lambda x : "{:.2%}".format(x)
is_configured = []
def timer(func):
def wrapper(*args, **kwargs):
t1 = datetime.datetime.now()
func(*args, **kwargs)
t2 = datetime.datetime.now()
print( "Function %s Time it took to run the function: %s" % (func.__name__, str(t2 - t1)) )
return wrapper
def group_by_sum(df, k):
return BRACKET_DECIMAL_FORMATTER(df[k].apply(lambda x: round(x, 2)).sum())
def margin_calc(df, k):
return PERCENTAGE_DECIMAL_FORMATTER(abs(df["CollMv"].apply(lambda x: round(x, 2)).sum()/df["MarketExposure"].apply(lambda x: round(x, 2)).sum()))
def django_configure():
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": CONST_DIC["template_dirs"],
}
]
INSTALLED_APPS = ("django.contrib.humanize",)
settings.configure(TEMPLATES=TEMPLATES)
# settings.INSTALLED_APPS += INSTALLED_APPS
django.setup()
return
def template_to_html(template_file, output_file, context_dict):
pid = os.getpid()
if pid not in is_configured:
django_configure()
is_configured.append(pid)
template = get_template(template_file)
return template.render(context_dict)
def render_to_pdf(template_file, output_file, context_dict):
try:
# print (output_file)
html = template_to_html(template_file, output_file, context_dict)
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# debug_html = open(output_file.replace("pdf", "html"), "w")
# debug_html.write(html)
# debug_html.close()
HTML(string=html).write_pdf(output_file)
return output_file
except:
return traceback.format_exc()
def import_client():
input_file = max(glob.iglob(CONST_DIC["cln_detail_path"]))
if CONST_DIC["cur_dt"] == "":
CONST_DIC["cur_dt"] = datetime.datetime.strptime(input_file[-12:-4], "%Y%m%d")
input_file = CONST_DIC["client_report_file"]
print (input_file)
df = pd.read_excel(input_file)
df.columns = [re.sub(r"[\*\.#/\$%\"\(\)& :]", "", c) for c in df.columns]
df = df.fillna("")
return df
def import_daily_data():
consol_xlsx = CONST_DIC["daily_data_path"].replace("????????", CONST_DIC["cur_dt"].strftime("%Y%m%d"))
margin_df = pd.read_excel(consol_xlsx, "margin")
cash_df = pd.read_excel(consol_xlsx, "cash")
eqt_ls_mv_df = pd.read_excel(consol_xlsx, "eqt_mv")
fut_exp_df = pd.read_excel(consol_xlsx, "deriv_exp")
eqt_shrt_coll_df = pd.read_excel(consol_xlsx, "eqt_shrt_coll")
unsetl_trd_df = pd.read_excel(consol_xlsx, "unsetl_trd")
sbl_inv_df = pd.read_excel(consol_xlsx, "sbl_inv")
g1_fx_df = pd.read_excel(consol_xlsx, "g1_fx")
return margin_df, cash_df, eqt_ls_mv_df, fut_exp_df, eqt_shrt_coll_df, unsetl_trd_df, sbl_inv_df, g1_fx_df
def df_to_dict(df, col_arr):
arr = []
lv_cols = list(col_arr[0].values())[0]
row_filter = list(col_arr[0].keys())
nxlv_list = list(filter(lambda s: not isinstance(s, dict), col_arr))
if (pd.Series(row_filter).isin(df.columns).all()):
for idx, row in df[row_filter].drop_duplicates().iterrows():
lv_df = pd.merge(df, row.to_frame().transpose(), on=row_filter, how="inner")
dic = { k: (tmp[0] if lv_cols[k] == "" else lv_cols[k](tmp[0]) ) for k, tmp in (lv_df[list(lv_cols.keys())].drop_duplicates().to_dict(orient="list")).items()}
for nx_lv in nxlv_list:
nx_lv_cols = list(nx_lv[0].keys())
if list(set(nx_lv_cols).intersection(lv_df.columns)) == nx_lv_cols:
lv_df = lv_df.sort_values(nx_lv_cols, ascending=True)
dic[nx_lv_cols[0]] = df_to_dict(lv_df, nx_lv)
arr.append(dic)
else:
dic = {}
for k, func in lv_cols.items():
if callable(func):
dic[k] = func(df, k)
else:
dic[k] = func
arr.append(dic)
return arr
def sd_pos_rpt(client_df, pos_df, fut_exp_df):
conf_dic = [{"Client": {"Client":"","businessDate":"", "BaseCCY":"", "ClientId":""} },
[{"InTypeCCY":{"InstrumentType":"", "Currency":"", "FxRate":""} },
[{"LSOrder":{"LS":""} },
[{"InstrumentCode":{"InstrumentCode":"", "Market":"", "InstrumentName":"", "MarginRatio":PERCENTAGE_FORMATTER, "SdQty":BRACKET_FORMATTER, "MktPrice":"", "SdMarketValue":BRACKET_DECIMAL_FORMATTER, "SdBaseValue":BRACKET_DECIMAL_FORMATTER } }],
[{"SumInstrumentCode":{"SdMarketValue": group_by_sum, "SdBaseValue": group_by_sum} }]
]
]
]
pos_df = pos_df.loc[pos_df.SdQty != 0]
pos_df.loc[pos_df.SdQty < 0, "MarginRatio"] = 0
pos_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
pos_df["SdBaseValue"] = pos_df["SdMarketValue"] * pos_df["FxRate"]
pos_df["SdBaseValue"] = pos_df["SdBaseValue"].apply(lambda x: round(x, 2))
pos_df["InTypeCCY"] = pos_df["InstrumentType"] + "|" + pos_df["Currency"]
pos_df["LS"] = np.where(pos_df["AccountType"] == "X", "Equity Borrow Excess", np.where(pos_df["SdQty"] >= 0, "Long", "Short"))
pos_df["LSOrder"] = pos_df["LS"].map({"Long":1, "Short":2, "Equity Borrow Excess":3})
pos_df = pos_df.sort_values(by=['Client', 'Currency', "LS", 'InstrumentType', 'InstrumentCode'], ascending=[True, True, True, True, True])
pos_df = pd.merge(pos_df, client_df[["FAName", "BaseCCY", "ClientId"]].drop_duplicates().rename(columns={"FAName":"Client","BaseCCY":"BaseCCY","ClientId":"ClientId"}), on=["Client"], how="right")
sd_dic = df_to_dict(pos_df, conf_dic)
return sd_dic
def td_pos_rpt(client_df, pos_df, fut_exp_df):
conf_dic = [{"Client": {"Client":"","businessDate":"", "BaseCCY":"", "ClientId":""} },
[{"InTypeCCY":{"InstrumentType":"", "Currency":"", "FxRate":""} },
[{"LSOrder":{"LS":""} },
[{"InstrumentCode":{"InstrumentCode":"", "Market":"", "InstrumentName":"", "MarginRatio":PERCENTAGE_FORMATTER, "TdQty":BRACKET_FORMATTER, "MktPrice":"", "TdMarketValue":BRACKET_DECIMAL_FORMATTER, "TdBaseValue":BRACKET_DECIMAL_FORMATTER } }],
[{"SumInstrumentCode":{"TdMarketValue": group_by_sum, "TdBaseValue": group_by_sum} }]
]
]
]
pos_df = pos_df.loc[pos_df.TdQty != 0]
pos_df.loc[pos_df.TdQty < 0, "MarginRatio"] = 0
pos_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
pos_df["TdBaseValue"] = pos_df["TdMarketValue"] * pos_df["FxRate"]
pos_df["TdBaseValue"] = pos_df["TdBaseValue"].apply(lambda x: round(x, 2))
pos_df["InTypeCCY"] = pos_df["InstrumentType"] + "|" + pos_df["Currency"]
pos_df["LS"] = np.where(pos_df["AccountType"] == "X", "Equity Borrow Excess", np.where(pos_df["TdQty"] >= 0, "Long", "Short"))
pos_df["LSOrder"] = pos_df["LS"].map({"Long":1, "Short":2, "Equity Borrow Excess":3})
pos_df = pos_df.sort_values(by=['Client', 'Currency', "LS", 'InstrumentType', 'InstrumentCode'], ascending=[True, True, True, True, True])
pos_df = pd.merge(pos_df, client_df[["FAName", "BaseCCY", "ClientId"]].drop_duplicates().rename(columns={"FAName":"Client","BaseCCY":"BaseCCY","ClientId":"ClientId"}), on=["Client"], how="right")
td_dic = df_to_dict(pos_df, conf_dic)
return td_dic
def cash_proj_rpt(client_df, cash_df):
conf_dic = [{"Client": {"Client":"","businessDate":"", "BaseCCY":"", "ClientId":"", "date_dp0":"", "date_dp1":"", "date_dp2":"", "date_dp3":""} },
[{"Currency":{"Currency":"", "FxRate":""} },
[{"CashtypeOrder": {"Cashtype":""} },
[{"SetlDt": {"local_d":BRACKET_DECIMAL_FORMATTER,"base_d":BRACKET_DECIMAL_FORMATTER} }]
]
]
]
weekday_arr = list( rrule(DAILY, dtstart=CONST_DIC["cur_dt"], count=4, byweekday=(MO,TU,WE,TH,FR)) )
day_header = list(map(lambda x: x.strftime("%d-%b-%Y"), weekday_arr))
proj_df = cash_df.groupby(["Client", "Currency", "FxRate", "SetlDt"]).sum().transpose().stack([0,1,1,0]).reset_index().rename(columns={"level_0": "Cashtype", 0: "local_d"})
proj_df["SetlDt"] = proj_df["SetlDt"].astype("int")
proj_df.loc[proj_df.SetlDt == 99, "SetlDt"] = -1
proj_df.loc[proj_df.Cashtype == "Projected", "Cashtype"] = "Projected Closing"
proj_df = pd.merge(proj_df, client_df[["FAName", "BaseCCY", "ClientId"]].drop_duplicates().rename(columns={"FAName":"Client"}), on=["Client"], how="inner")
proj_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
proj_df["base_d"] = proj_df["local_d"] * proj_df["FxRate"]
proj_df["CashtypeOrder"] = proj_df["Cashtype"].map(cash_type_order)
for idx, weekday in enumerate(day_header):
proj_df["date_dp" + str(idx)] = weekday
proj_dic = df_to_dict(proj_df, conf_dic)
return proj_dic
def margin_req_rpt(client_df, margin_df):
conf_dic = [{"Client": {"Client":"","businessDate":"", "BaseCCY":"", "ClientId":""} },
[{"Company": {"Company":""} },
[{"Currency":{"Currency":"", "FxRate":""} },
[{"AccountOrder": {"InstrumentType":"", "SettledCash":MARGIN_BRACKET_DECIMAL_FORMATTER, "PendingCash":MARGIN_BRACKET_DECIMAL_FORMATTER, "Accrued":MARGIN_BRACKET_DECIMAL_FORMATTER,
"TDMV":MARGIN_BRACKET_DECIMAL_FORMATTER, "SDMV":MARGIN_BRACKET_DECIMAL_FORMATTER, "Margin":MARGIN_BRACKET_DECIMAL_FORMATTER, "Collateral":MARGIN_BRACKET_DECIMAL_FORMATTER,
"AvaBal":MARGIN_BRACKET_DECIMAL_FORMATTER, "AvaBalBase":MARGIN_BRACKET_DECIMAL_FORMATTER, } },
]
],
[{"SumBase":{ "AvaBalBase": group_by_sum} }]
]
]
# print (margin_df)
margin_df["InstrumentType"] = margin_df["AccountType"].map(ins_type_dict)
margin_df["AccountOrder"] = margin_df["AccountType"].map(ins_type_order)
margin_df["PendingCash"] = margin_df["PendingCash"].astype('float64')
margin_df = pd.merge(margin_df, client_df[["FAName", "BaseCCY", "ClientId"]].drop_duplicates().rename(columns={"FAName":"Client","BaseCCY":"BaseCCY","ClientId":"ClientId"}), on=["Client"], how="inner")
margin_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
margin_df["AvaBal"] = 0
margin_df.loc[(margin_df.AccountType == "L") | (margin_df.AccountType == "B"),"AvaBal"] = margin_df["Collateral"] + margin_df["SettledCash"] + margin_df["PendingCash"] + margin_df["Accrued"]
margin_df.loc[margin_df.AccountType == "S","AvaBal"] = margin_df["Collateral"] + margin_df["Margin"] + margin_df["SettledCash"]
margin_df.loc[margin_df.AccountType == "F","AvaBal"] = margin_df["Collateral"] + margin_df["Margin"]
margin_df["AvaBalBase"] = margin_df["AvaBal"] * margin_df["FxRate"]
margin_df["Company"] = margin_df["AccountType"].map(company_dict)
margin_dic = df_to_dict(margin_df, conf_dic)
return margin_dic
def acc_summ_rpt(client_df, margin_df, cash_df):
acc_summ_dic = {}
conf_dic = [{"Client": {"Client":"","businessDate":"", "BaseCCY":"", "ClientId":"", "date_dp0":"", "date_dp1":"", "date_dp2":"", "date_dp3":""} },
[{"CashtypeOrder": {"Cashtype":""} },
[{"SetlDt": {"base_d":BRACKET_DECIMAL_FORMATTER} }]
],
[{"InstrumentType": {"InstrumentType":"", "BaseMV":BRACKET_DECIMAL_FORMATTER} }]
]
weekday_arr = list( rrule(DAILY, dtstart=CONST_DIC["cur_dt"], count=4, byweekday=(MO,TU,WE,TH,FR)) )
day_header = list(map(lambda x: x.strftime("%d-%b-%Y"), weekday_arr))
cash_df["Opening"] = cash_df["Opening"] * cash_df["FxRate"]
cash_df["Movement"] = cash_df["Movement"] * cash_df["FxRate"]
cash_df["Projected"] = cash_df["Projected"] * cash_df["FxRate"]
base_cash_df = cash_df.groupby(["Client","SetlDt"], as_index=False).agg({"Opening":"sum", "Movement":"sum", "Projected":"sum"})
proj_df = base_cash_df.groupby(["Client", "SetlDt"]).sum().transpose().stack([0,0]).reset_index().rename(columns={"level_0": "Cashtype", 0: "base_d"})
proj_df = proj_df.drop(proj_df[proj_df.Cashtype == "Movement"].index)
proj_df.loc[proj_df.Cashtype == "Projected", "Cashtype"] = "Projected Closing"
proj_df = pd.merge(proj_df, client_df[["FAName", "BaseCCY", "ClientId"]].drop_duplicates().rename(columns={"FAName":"Client"}), on=["Client"], how="inner")
proj_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
for idx, weekday in enumerate(day_header):
proj_df["date_dp" + str(idx)] = weekday
margin_df["InstrumentType"] = margin_df["AccountType"].map(ins_type_dict)
margin_df["BaseMV"] = margin_df["SDMV"] * margin_df["FxRate"]
asset_df = margin_df.groupby(["Client","InstrumentType"], as_index=False).agg({"BaseMV": "sum"})
proj_df["CashtypeOrder"] = proj_df["Cashtype"].map(cash_type_order)
proj_df = pd.merge(proj_df, asset_df, on=["Client"], how="inner")
proj_dic = df_to_dict(proj_df, conf_dic)
return proj_dic
def sbl_inv_rpt(client_df, cash_df, sbl_inv_df):
conf_dic = [{"Client": {"Client":"","businessDate":"", "ClientId":""} },
[{"LOAN_TYPE":{"LOAN_TYPE":""} },
[{"BGNREF":{"CCY":"", "TRD_DT":"", "SSET_DT":"", "InsCode":"", "ric":"", "qty":BRACKET_FORMATTER, "COLLRATE":PERCENTAGE_DECIMAL_FORMATTER, "status":"", "LNCCY":"",
"FxRate":BRACKET_FOUR_DECIMAL_FORMATTER, "LOANVALUE":BRACKET_DECIMAL_FORMATTER, "MIN_FEE":BRACKET_DECIMAL_FORMATTER, "dailyfee":BRACKET_DECIMAL_FORMATTER } }],
[{"InsSum":{"dailyfee": group_by_sum} }]
]
]
sbl_inv_df = pd.merge(cash_df[["Currency", "FxRate"]].drop_duplicates().rename(columns={"Currency":"CCY"}), sbl_inv_df, on=["CCY"], how="inner")
sbl_inv_df = pd.merge(client_df.loc[client_df.AccountType == "S"].rename(columns={"FAName":"Client"}), sbl_inv_df.rename(columns={"CPTY":"Client"}), on=["Client"], how="inner")
sbl_inv_df["LOANVALUE"] = abs(sbl_inv_df["LOANVALUE"])
sbl_inv_df["LOAN_TYPE"] = sbl_inv_df["LOAN_TYPE"].map({"OS":"Outstanding","PD":"Pending"})
sbl_inv_df["InsCode"] = sbl_inv_df["STOCK"].map(str) + " " + sbl_inv_df["CCY"].map(lambda x: str(x)[0:2]) + " Equity"
sbl_inv_df["ric"] = sbl_inv_df["STOCK"].map(str) + "." + sbl_inv_df["CCY"].map(lambda x: str(x)[0:2])
sbl_inv_df["qty"] = abs(sbl_inv_df["QTY"] )
sbl_inv_df["status"] = np.where((sbl_inv_df["LOAN_TYPE"] == "Pending"), np.where((sbl_inv_df["QTY"] > 0), "RETURN BORROW", "BORROW"), "BORROW")
sbl_inv_df["dailyfee"] = sbl_inv_df["LOANVALUE"]*sbl_inv_df["COLLRATE"]/sbl_inv_df["LNCCY"].map(rate_div_dict)*sbl_inv_df["FxRate"]
sbl_inv_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
sbl_inv_dic = df_to_dict(sbl_inv_df, conf_dic)
return sbl_inv_dic
def sbl_coll_rpt(client_df, eqt_shrt_coll_df, g1_fx_df):
conf_dic = [{"Client": {"Client":"","businessDate":"", "ClientId":""} },
[{"Currency":{"Currency":""} },
[{"Transaction":{"Transaction":"", "Margin":BRACKET_DECIMAL_FORMATTER, "MarketExposure":BRACKET_DECIMAL_FORMATTER, "CollMv":BRACKET_DECIMAL_FORMATTER,
"MarginExposure":BRACKET_DECIMAL_FORMATTER, "FxRate":BRACKET_FOUR_DECIMAL_FORMATTER, "baseMargin":BRACKET_DECIMAL_FORMATTER } }],
[{"TransactionSum":{"MarketExposure":group_by_sum, "CollMv":group_by_sum, "MarginExposure":group_by_sum,
"MARGIN":margin_calc, "baseMargin":group_by_sum} }]
]
]
eqt_shrt_coll_df["MarketExposure"] = eqt_shrt_coll_df["MarketExposure"].apply(lambda x: str(x).replace(',','')).astype("float64")
eqt_shrt_coll_df["MarginExposure"] = eqt_shrt_coll_df["MarginExposure"].apply(lambda x: str(x).replace(',','')).astype("float64")
eqt_shrt_coll_df["CollMv"] = np.where((eqt_shrt_coll_df["Transaction"] == "Loan") | (eqt_shrt_coll_df["Transaction"] == "Borrow"), 0, eqt_shrt_coll_df["MarketExposure"])
eqt_shrt_coll_df["MarketExposure"] = np.where((eqt_shrt_coll_df["Transaction"] == "Loan") | (eqt_shrt_coll_df["Transaction"] == "Borrow"), eqt_shrt_coll_df["MarketExposure"], 0)
eqt_shrt_coll_df = pd.merge(eqt_shrt_coll_df, client_df[["FAName", "BaseCCY", "ClientId"]].drop_duplicates().rename(columns={"FAName":"Client","BaseCCY":"BaseCCY","ClientId":"ClientId"}), on=["Client"], how="inner")
eqt_shrt_coll_df = pd.merge(eqt_shrt_coll_df, g1_fx_df.rename(columns={"CURRENCY":"Currency","EXCHANGE_RATE":"ExRate"}), on=["Currency"], how="inner")
eqt_shrt_coll_df = pd.merge(eqt_shrt_coll_df, g1_fx_df.rename(columns={"CURRENCY":"BaseCCY","EXCHANGE_RATE":"BaseRate"}), on=["BaseCCY"], how="inner")
eqt_shrt_coll_df["FxRate"] = eqt_shrt_coll_df["BaseRate"].astype("float64") / eqt_shrt_coll_df["ExRate"].astype("float64")
eqt_shrt_coll_df["baseMargin"] = eqt_shrt_coll_df["MarginExposure"] * eqt_shrt_coll_df["FxRate"]
eqt_shrt_coll_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
sbl_coll_dic = df_to_dict(eqt_shrt_coll_df, conf_dic)
return sbl_coll_dic
def accrued_rpt(client_df, margin_df):
conf_dic = [{"Client": {"Client":"","businessDate":"", "ClientId":"", "InterestRebate":"", "IsLeverage":""} },
[{"Currency":{"Currency":""} },
[{"dt":{"dt":"", "ccy":"", "SettledCash":BRACKET_FORMATTER, "day":"", "accrued":BRACKET_DECIMAL_FORMATTER } }],
[{"sumdt":{"sumccy":"", "accrued":group_by_sum}}]
]
]
margin_df = pd.merge(margin_df, client_df[["FAName", "BaseCCY", "ClientId"]].drop_duplicates().rename(columns={"FAName":"Client","BaseCCY":"BaseCCY","ClientId":"ClientId"}), on=["Client"], how="inner")
accrued_df = margin_df.groupby(["Client", "ClientId", "Currency"], as_index=False).agg({"Accrued":"sum", "SettledCash":"sum"})
accrued_df["dt"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
accrued_df["ccy"] = accrued_df["Currency"]
accrued_df["sumccy"] = accrued_df["Currency"]
accrued_df["day"] = accrued_df["ccy"].map(rate_div_dict)
accrued_df["businessDate"] = CONST_DIC["cur_dt"].strftime("%d-%b-%Y")
accrued_df["InterestRebate"] = np.where(accrued_df["Accrued"].astype("float") < 0, "Interest", "Rebate")
accrued_df["IsLeverage"] = np.where(accrued_df["Accrued"].astype("float") < 0, "Leverage", "")
accrued_df["accrued"] = accrued_df["Accrued"].map(abs)
accrued_dic = df_to_dict(accrued_df, conf_dic)
return accrued_dic
@timer
def main():
client_df = import_client()
margin_df, cash_df, eqt_ls_mv_df, fut_exp_df, eqt_shrt_coll_df, unsetl_trd_df, sbl_inv_df, g1_fx_df = import_daily_data()
pool = multiprocessing.Pool( multiprocessing.cpu_count() )
tasks = []
sd_dic = sd_pos_rpt(client_df, eqt_ls_mv_df, fut_exp_df)
td_dic = td_pos_rpt(client_df, eqt_ls_mv_df, fut_exp_df)
proj_dic = cash_proj_rpt(client_df, cash_df)
margin_dic = margin_req_rpt(client_df, margin_df)
acc_summ_dic = acc_summ_rpt(client_df, margin_df, cash_df)
sbl_inv_dic = sbl_inv_rpt(client_df, cash_df, sbl_inv_df)
sbl_coll_dic = sbl_coll_rpt(client_df, eqt_shrt_coll_df, g1_fx_df)
accrued_dic = accrued_rpt(client_df, margin_df)
for cln_dic in sd_dic:
tasks.append((CONST_DIC["sd_pos_rpt_template"], CONST_DIC["sd_pos_rpt_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
for cln_dic in td_dic:
tasks.append((CONST_DIC["td_pos_rpt_template"], CONST_DIC["td_pos_rpt_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
for cln_dic in proj_dic:
tasks.append((CONST_DIC["cash_proj_rpt_template"], CONST_DIC["cash_proj_rpt_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
for cln_dic in margin_dic:
tasks.append((CONST_DIC["margin_rpt_template"], CONST_DIC["margin_rpt_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
for cln_dic in acc_summ_dic:
tasks.append((CONST_DIC["acc_summ_template"], CONST_DIC["acc_summ_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
for cln_dic in sbl_inv_dic:
tasks.append((CONST_DIC["sbl_stk_template"], CONST_DIC["sbl_stk_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
for cln_dic in sbl_coll_dic:
tasks.append((CONST_DIC["sbl_coll_template"], CONST_DIC["sbl_coll_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
for cln_dic in accrued_dic:
tasks.append((CONST_DIC["mtd_accrued_template"], CONST_DIC["mtd_accrued_output"].replace("[client]", cln_dic["Client"]).replace("[YYYYMMDD]", CONST_DIC["cur_dt"].strftime("%Y%m%d")), cln_dic))
results = [pool.apply_async(render_to_pdf, t) for t in tasks ]
for result in results:
print (result.get())
return
if __name__ == "__main__":
print ("PB Cash Report")
try:
# for dt in list( rrule(DAILY, dtstart=datetime.datetime.strptime("20180919", "%Y%m%d"), until=(datetime.datetime.now()+datetime.timedelta(days=-1)), byweekday=(MO,TU,WE,TH,FR)) ):
# CONST_DIC["cur_dt"] = dt
if True:
main()
except KeyboardInterrupt:
print ("Ctrl+C pressed. Stopping...")
|
apache-2.0
|
mne-tools/mne-python
|
tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py
|
10
|
5666
|
"""
===============================================================
Non-parametric 1 sample cluster statistic on single trial power
===============================================================
This script shows how to estimate significant clusters
in time-frequency power estimates. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
- extracting epochs
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if ratio deviates from 1.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_1samp_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax, event_id = -0.3, 0.6, 1
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# just use right temporal sensors for speed
epochs.pick_channels(mne.read_vectorview_selection('Right-temporal'))
evoked = epochs.average()
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
freqs = np.arange(8, 40, 2) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
tfr_epochs = tfr_morlet(epochs, freqs, n_cycles=4., decim=decim,
average=False, return_itc=False, n_jobs=1)
# Baseline power
tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))
# Crop in time to keep only what is between 0 and 400 ms
evoked.crop(-0.1, 0.4)
tfr_epochs.crop(-0.1, 0.4)
epochs_power = tfr_epochs.data
###############################################################################
# Define adjacency for statistics
# -------------------------------
# To compute a cluster-corrected value, we need a suitable definition
# for the adjacency/adjacency of our values. So we first compute the
# sensor adjacency, then combine that with a grid/lattice adjacency
# assumption for the time-frequency plane:
sensor_adjacency, ch_names = mne.channels.find_ch_adjacency(
tfr_epochs.info, 'grad')
# Subselect the channels we are actually using
use_idx = [ch_names.index(ch_name.replace(' ', ''))
for ch_name in tfr_epochs.ch_names]
sensor_adjacency = sensor_adjacency[use_idx][:, use_idx]
assert sensor_adjacency.shape == \
(len(tfr_epochs.ch_names), len(tfr_epochs.ch_names))
assert epochs_power.data.shape == (
len(epochs), len(tfr_epochs.ch_names),
len(tfr_epochs.freqs), len(tfr_epochs.times))
adjacency = mne.stats.combine_adjacency(
sensor_adjacency, len(tfr_epochs.freqs), len(tfr_epochs.times))
# our adjacency is square with each dim matching the data size
assert adjacency.shape[0] == adjacency.shape[1] == \
len(tfr_epochs.ch_names) * len(tfr_epochs.freqs) * len(tfr_epochs.times)
###############################################################################
# Compute statistic
# -----------------
threshold = 3.
n_permutations = 50 # Warning: 50 is way too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,
threshold=threshold, tail=0,
adjacency=adjacency,
out_type='mask', verbose=True)
###############################################################################
# View time-frequency plots
# -------------------------
evoked_data = evoked.data
times = 1e3 * evoked.times
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
# Just plot one channel's data
ch_idx, f_idx, t_idx = np.unravel_index(
np.nanargmax(np.abs(T_obs_plot)), epochs_power.shape[1:])
# ch_idx = tfr_epochs.ch_names.index('MEG 1332') # to show a specific one
vmax = np.max(np.abs(T_obs))
vmin = -vmax
plt.subplot(2, 1, 1)
plt.imshow(T_obs[ch_idx], cmap=plt.cm.gray,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.imshow(T_obs_plot[ch_idx], cmap=plt.cm.RdBu_r,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(f'Induced power ({tfr_epochs.ch_names[ch_idx]})')
ax2 = plt.subplot(2, 1, 2)
evoked.plot(axes=[ax2], time_unit='s')
plt.show()
|
bsd-3-clause
|
asieriko/python-educa
|
notakebtest.py
|
1
|
11753
|
import pandas as pd
import numpy as np
import unittest
from pandas.util.testing import assert_frame_equal
import notakeb
class KnownValues(unittest.TestCase):
def test_notPassedStats(self):
n = notakeb.notak("","")
s = pd.Series([0,0,8,0,0,2,0,2,2,0,3,1,1,1,0,8,8,3,1,1,0,2,2,5,5,0,0,4,6,3], index=["aabadiacaj","aalvardiaz","acarvalper","agarciacha","ajimenemen2","aruedasbal","ausandilor","cmoralegil","dlopezarra","dmoreirpad","etamarerod","gmorenogob","gperezgarc","icabodeder","iiribarjul","imatillbla","jarminogal","jcarazoval","jcorrei1","jotazubosq","myaguancab","nurrutipla","pgayarrarr1","psamperdel","psantamlar1","rsaizherre","tdiallo","vcunhasori","wameziaaho","zcastrogar"])
s.name="grade"
s.index.name="uniquename"
result = n.notPassedStats(s)
expected = {0: 10, 1: 5, 2: 5, 3: 3, 4: 1, 5: 2, 6: 1, 8: 3}
self.assertDictEqual(result,expected)
def test_generatePiedata(self):
n = notakeb.notak("mendillorriN.db","eu")
n.setWorkDir("1ebaluaketa15-16")
n.getData("2015-2016", ["1. Ebaluazioa"], 1)
n.df = n.df[n.df.year!="2016-2017"]
missed = {0: 10, 1: 5, 2: 5, 3: 3, 4: 1, 5: 2, 6: 1, 7: 0, 8: 3}
result = (n.generatePiedata(missed))
expected = ([20, 4, 6],[10, 5, 5, 0, 0, 0, 0, 0, 0],[0, 0, 0, 3, 1, 0, 0, 0, 0],[0, 0, 0, 0, 0, 2, 1, 0, 3],[0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5],['0', '1', '2', '3', '4', '5', '6', '7', '8'])
self.assertEqual(result,expected)
def test_getGroupPromStats(self):
n = notakeb.notak("mendillorriN.db","eu")
n.setWorkDir("1ebaluaketa15-16")
n.getData("2015-2016", ["1. Ebaluazioa"], 1)
n.df = n.df[n.df.year!="2016-2017"]
data = {'cgroup':['1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A'],
'year':['2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016'],
'period':['1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa'],
'uniquename':['aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2'],
'grade':[7,8,6,9,8,8,6,6,7,6,7,8,8,7,7,7,8,7,7,6,3,5,3,4,3,4,4,5,4,4,8,8,8,5,8,7,6,7,7,7,7,7,6,8,8,6,6,9,6,9]}
n.df = pd.DataFrame(data)
result = (n.getGroupPromStats("1º A"))
expected = ([80.0, 0.0, 20.0], 1.6)
self.assertEqual(result,expected)
def test_generateStatsGroup(self):
n = notakeb.notak("mendillorriN.db","eu")
n.getData("2015-2016", ["1. Ebaluazioa"], 1)
data = {'cgroup':['1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A','1º A'],
'year':['2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016','2015-2016'],
'lang':['AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG','AG'],
'period':['1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa','1. Ebaluazioa'],
'subject':['mate','gazte','ing','mus','tek','plas','giz','biogeo','fran','eusk','mate','gazte','ing','mus','tek','plas','giz','biogeo','fran','eusk','mate','gazte','ing','mus','tek','plas','giz','biogeo','fran','eusk','mate','gazte','ing','mus','tek','plas','giz','biogeo','fran','eusk','mate','gazte','ing','mus','tek','plas','giz','biogeo','fran','eusk'],
'uniquename':['aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aabadiacaj','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','aalvardiaz','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','acarvalper','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','agarciacha','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2','ajimenemen2'],
'grade':[3,8,6,9,8,8,6,6,7,6,3,8,8,7,7,7,8,7,7,6,3,5,3,4,3,4,4,5,4,4,3,8,8,5,8,7,6,7,7,7,7,7,6,8,8,6,6,9,6,9]}
n.df = pd.DataFrame(data)
rsubjectsgrouppt,rbadsubjectsgroup,rgroupgrades,rstudentsnotpasses,rpie,rmean,rpercent = (n.generateStatsGroup("2015-2016",n.periods[n.period-1],n.periods,"1º A", False))
data = {'subject':['biogeo','eusk','fran','gazte','giz','ing','mate','mus','plas','tek','All'],
'grade':['100.0','80.0','80.0','100.0','80.0','80.0','20.0','80.0','80.0','80.0','78.0']}
esubjectsgrouppt = pd.DataFrame(data)
esubjectsgrouppt.set_index("subject",inplace=True)
esubjectsgrouppt['grade'] = esubjectsgrouppt.grade.astype(float)
data = {'subject':['mate'],
'%':['20.0']}
ebadsubjectsgroup = pd.DataFrame(data)
ebadsubjectsgroup = ebadsubjectsgroup[["subject","%"]]
ebadsubjectsgroup['%'] = ebadsubjectsgroup['%'].astype(float)
data = {'subject':['biogeo','eusk','fran','gazte','giz','ing','mate','mus','plas','tek','All'],
'grade':['6.8','6.4','6.2','7.2','6.0','6.2','3.8','6.6','6.4','6.8','6.24']}
egroupgrades = pd.DataFrame(data)
egroupgrades = egroupgrades[["subject","grade"]]
egroupgrades['grade'] = egroupgrades.grade.astype(float)
data = {'uniquename':['acarvalper','aabadiacaj','aalvardiaz','agarciacha','ajimenemen2'],
'<5':[8,1,1,1,0],
'avg':['3.9','6.7','6.8','6.6','7.2']}
index = [2,0,1,3,4]
estudentsnotpasses = pd.DataFrame(data,index=index)
estudentsnotpasses['avg'] = estudentsnotpasses.avg.astype(float)
estudentsnotpasses = estudentsnotpasses[["uniquename","<5","avg"]]
epie = '1º A-1. Ebaluazioa-eu.png'
emean = '1º A (2015-2016) 1. Ebaluazioa-mean-eu.png'
epercent = '1º A (2015-2016) 1. Ebaluazioa-percent-eu.png'
expected = ([80.0, 0.0, 20.0], 1.6)
self.assertEqual(rpie,epie)
self.assertEqual(rmean,emean)
self.assertEqual(rpercent,epercent)
assert_frame_equal(rsubjectsgrouppt,esubjectsgrouppt)
assert_frame_equal(rbadsubjectsgroup,ebadsubjectsgroup)
assert_frame_equal(rgroupgrades,egroupgrades)
assert_frame_equal(rstudentsnotpasses,estudentsnotpasses)
def test_generateFinalGrade(self):
n = notakeb.notak("mendillorriN.db","eu")
data = {'year' : pd.Series(["2015-2016", "2015-2016", "2015-2016","2015-2016","2015-2016","2015-2016","2015-2016"]),
'uniquename' : pd.Series(["john","john","john","john","john","michael","michael"]),
'subject' : pd.Series(["Math","Math","Fis","Fis","ICT","Fis","Fis"]),
'period' : pd.Series(["Azken Ebaluazioa","Ohiz kanpoko Ebaluazioa","Azken Ebaluazioa","Ohiz kanpoko Ebaluazioa","Azken Ebaluazioa","Azken Ebaluazioa","Ohiz kanpoko Ebaluazioa"]),
'grade' : pd.Series([2,5,4,6,10,3,8]),}
n.df = pd.DataFrame(data)
n.generateFinalGrade()
data = {'year' : pd.Series(["2015-2016", "2015-2016", "2015-2016","2015-2016","2015-2016","2015-2016","2015-2016","2015-2016","2015-2016","2015-2016","2015-2016"]),
'uniquename' : pd.Series(["john","john","john","john","john","michael","michael","john","john","john","michael"]),
'subject' : pd.Series(["Math","Math","Fis","Fis","ICT","Fis","Fis","Fis","ICT","Math","Fis"]),
'period' : pd.Series(["Azken Ebaluazioa","Ohiz kanpoko Ebaluazioa","Azken Ebaluazioa","Ohiz kanpoko Ebaluazioa","Azken Ebaluazioa","Azken Ebaluazioa","Ohiz kanpoko Ebaluazioa","Final","Final","Final","Final"]),
'grade' : pd.Series([2,5,4,6,10,3,8,6,10,5,8]),}
dfr = pd.DataFrame(data)
assert_frame_equal(n.df,dfr)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
dnouri/nolearn
|
setup.py
|
1
|
1695
|
import os
import codecs
from setuptools import setup, find_packages
version = '0.6.2.dev0'
here = os.path.abspath(os.path.dirname(__file__))
try:
README = codecs.open(os.path.join(here, 'README.rst'),
encoding='utf-8').read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
except IOError:
README = CHANGES = ''
install_requires = [
'joblib',
'scikit-learn',
'tabulate',
'Lasagne',
'Theano',
]
visualization_require = [
'matplotlib<3.0.999',
'pydotplus',
'ipython<5.999'
]
tests_require = [
'mock',
'pytest',
'pytest-cov',
'pytest-flakes',
'pytest-pep8',
]
docs_require = [
'Sphinx<1.999',
]
gdbn_require = [
"gdbn"
]
all_require = (visualization_require + tests_require + docs_require + gdbn_require)
setup(name='nolearn',
version=version,
description="scikit-learn compatible neural network library",
long_description='\n\n'.join([README, CHANGES]),
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
],
keywords='',
author='Daniel Nouri',
author_email='[email protected]',
url='https://github.com/dnouri/nolearn',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require={
'visualization': visualization_require,
'testing': tests_require,
'docs': docs_require,
'gdbn': gdbn_require,
'all': all_require,
},
)
|
mit
|
zihua/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
22
|
56780
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='binary')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_precision_recall_f_unused_pos_label():
# Check warning that pos_label unused when set to non-default value
# but average != 'binary'; even if data is binary.
assert_warns_message(UserWarning,
"Note that pos_label (set to 2) is "
"ignored when average != 'binary' (got 'macro'). You "
"may use labels=[pos_label] to specify a single "
"positive class.", precision_recall_fscore_support,
[1, 2, 1], [1, 2, 2], pos_label=2, average='macro')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), .9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="quadratic"), .9541, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"]) * -1
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true,
rng.randint(-100, 100) * np.ones(20, dtype=int))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_binary_averaged():
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred,
average=None)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='macro')
assert_equal(p, np.mean(ps))
assert_equal(r, np.mean(rs))
assert_equal(f, np.mean(fs))
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='weighted')
support = np.bincount(y_true)
assert_equal(p, np.average(ps, weights=support))
assert_equal(r, np.average(rs, weights=support))
assert_equal(f, np.average(fs, weights=support))
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='macro'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='macro'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='macro'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
# a label not in y_true should result in zeros for that row/column
extra_label = np.max(y_true) + 1
cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label])
assert_array_equal(cm, [[18, 0],
[0, 0]])
# check for exception when none of the specified labels are in y_true
assert_raises(ValueError, confusion_matrix, y_true, y_pred,
labels=[extra_label, extra_label + 1])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
assert_warns(DeprecationWarning, hamming_loss, y1, y2, classes=[0, 1])
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='binary')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='binary')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_binary_data_non_binary():
# Error if user does not explicitly set non-binary average mode
y_true_mc = [1, 2, 3, 3]
y_pred_mc = [1, 2, 3, 1]
y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])
y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
for y_true, y_pred, y_type in [
(y_true_mc, y_pred_mc, 'multiclass'),
(y_true_ind, y_pred_ind, 'multilabel-indicator'),
]:
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
assert_raise_message(ValueError,
"Target is %s but average='binary'. Please "
"choose another average setting." % y_type,
metric, y_true, y_pred)
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = ('y_true contains only one label (2). Please provide '
'the true labels explicitly through the labels argument.')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = ('Found input variables with inconsistent numbers of samples: '
'[3, 2]')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
# calculate even if only single class in y_true (#6980)
assert_almost_equal(brier_score_loss([0], [0.5]), 0.25)
assert_almost_equal(brier_score_loss([1], [0.5]), 0.25)
|
bsd-3-clause
|
youprofit/shogun
|
examples/undocumented/python_modular/graphical/multiclass_qda.py
|
26
|
3294
|
"""
Shogun demo
Fernando J. Iglesias Garcia
"""
import numpy as np
import matplotlib as mpl
import pylab
import util
from scipy import linalg
from modshogun import QDA
from modshogun import RealFeatures, MulticlassLabels
# colormap
cmap = mpl.colors.LinearSegmentedColormap('color_classes',
{'red': [(0, 1, 1),
(1, .7, .7)],
'green': [(0, 1, 1),
(1, .7, .7)],
'blue': [(0, 1, 1),
(1, .7, .7)]})
pylab.cm.register_cmap(cmap = cmap)
# Generate data from Gaussian distributions
def gen_data():
np.random.seed(0)
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([-4, 3]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-1, -5]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([3, 4])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def plot_data(qda, X, y, y_pred, ax):
X0, X1, X2 = X[y == 0], X[y == 1], X[y == 2]
# Correctly classified
tp = (y == y_pred)
tp0, tp1, tp2 = tp[y == 0], tp[y == 1], tp[y == 2]
X0_tp, X1_tp, X2_tp = X0[tp0], X1[tp1], X2[tp2]
# Misclassified
X0_fp, X1_fp, X2_fp = X0[tp0 != True], X1[tp1 != True], X2[tp2 != True]
# Class 0 data
pylab.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color = cols[0])
pylab.plot(X0_fp[:, 0], X0_fp[:, 1], 's', color = cols[0])
m0 = qda.get_mean(0)
pylab.plot(m0[0], m0[1], 'o', color = 'black', markersize = 8)
# Class 1 data
pylab.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color = cols[1])
pylab.plot(X1_fp[:, 0], X1_fp[:, 1], 's', color = cols[1])
m1 = qda.get_mean(1)
pylab.plot(m1[0], m1[1], 'o', color = 'black', markersize = 8)
# Class 2 data
pylab.plot(X2_tp[:, 0], X2_tp[:, 1], 'o', color = cols[2])
pylab.plot(X2_fp[:, 0], X2_fp[:, 1], 's', color = cols[2])
m2 = qda.get_mean(2)
pylab.plot(m2[0], m2[1], 'o', color = 'black', markersize = 8)
def plot_cov(plot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0]) # rad
angle = 180 * angle / np.pi # degrees
# Filled gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2*v[0]**0.5, 2*v[1]**0.5, 180 + angle, color = color)
ell.set_clip_box(plot.bbox)
ell.set_alpha(0.5)
plot.add_artist(ell)
def plot_regions(qda):
nx, ny = 500, 500
x_min, x_max = pylab.xlim()
y_min, y_max = pylab.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
dense = RealFeatures(np.array((np.ravel(xx), np.ravel(yy))))
dense_labels = qda.apply(dense).get_labels()
Z = dense_labels.reshape(xx.shape)
pylab.pcolormesh(xx, yy, Z)
pylab.contour(xx, yy, Z, linewidths = 3, colors = 'k')
# Number of classes
M = 3
# Number of samples of each class
N = 300
# Dimension of the data
dim = 2
cols = ['blue', 'green', 'red']
fig = pylab.figure()
ax = fig.add_subplot(111)
pylab.title('Quadratic Discrimant Analysis')
X, y = gen_data()
labels = MulticlassLabels(y)
features = RealFeatures(X.T)
qda = QDA(features, labels, 1e-4, True)
qda.train()
ypred = qda.apply().get_labels()
plot_data(qda, X, y, ypred, ax)
for i in range(M):
plot_cov(ax, qda.get_mean(i), qda.get_cov(i), cols[i])
plot_regions(qda)
pylab.connect('key_press_event', util.quit)
pylab.show()
|
gpl-3.0
|
machinelearningnanodegree/stanford-cs231
|
solutions/levin/assignment1/features/featuresmodel.py
|
1
|
8804
|
import sys
import os
from astropy.units import ys
sys.path.insert(0, os.path.abspath('..'))
import random
import numpy as np
from assignment1.cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from assignment1.cs231n import data_utils
from assignment1.cs231n.features import *
from utility.dumpload import DumpLoad
from assignment1.cs231n.classifiers.linear_classifier import LinearSVM
from assignment1.cs231n.classifiers.neural_net import TwoLayerNet
import cv2
class FeaturesModel(object):
def __init__(self):
return
def get_CIFAR10_data(self, num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = '../cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
self.X_train = X_train
self.y_train = y_train
self.X_val = X_val
self.y_val = y_val
self.X_test = X_test
self.y_test = y_test
return
def save_sample_images(self):
cifar10_dir = '../cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
num_training = X_train.shape[0]
sample_indiecs = np.random.choice(num_training, size=5)
sample_images= X_train[sample_indiecs]
img_id = 0
for sample in sample_images:
img_id += 1
image_name = './temp/img_' + str(img_id) + '.jpg'
cv2.imwrite(image_name,sample)
return
def explore_sift(self):
cifar10_dir = '../cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
sift_len = []
for img in X_train:
sift = cv2.SIFT()
gray= cv2.cvtColor(img.astype(np.uint8),cv2.COLOR_BGR2GRAY)
kp = sift.detect(gray,None)
kp,des = sift.compute(gray, kp)
if len(kp) == 0:
image_name = './temp/zero_sift'+ '.jpg'
cv2.imwrite(image_name, img)
return
sift_len.append(len(kp))
print min(sift_len)
print max(sift_len)
print np.mean(sift_len)
return
def extract_features(self):
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
self.X_train_feats = extract_features(self.X_train, feature_fns, verbose=True)
self.X_val_feats = extract_features(self.X_val, feature_fns)
self.X_test_feats = extract_features(self.X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(self.X_train_feats, axis=0, keepdims=True)
self.X_train_feats -= mean_feat
self.X_val_feats -= mean_feat
self.X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(self.X_train_feats, axis=0, keepdims=True)
self.X_train_feats /= std_feat
self.X_val_feats /= std_feat
self.X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
self.X_train_feats = np.hstack([self.X_train_feats, np.ones((self.X_train_feats.shape[0], 1))])
self.X_val_feats = np.hstack([self.X_val_feats, np.ones((self.X_val_feats.shape[0], 1))])
self.X_test_feats = np.hstack([self.X_test_feats, np.ones((self.X_test_feats.shape[0], 1))])
return
def load_data(self):
dump_load = DumpLoad('./temp/hogsdata.pickle')
if not dump_load.isExisiting():
self.get_CIFAR10_data()
self.extract_features()
preprocessed_dataset = self.X_train_feats, self.y_train, self.X_val_feats,self.y_val, self.X_test_feats,self.y_test, self.X_test
dump_load.dump(preprocessed_dataset)
self.X_train_feats, self.y_train, self.X_val_feats,self.y_val, self.X_test_feats,self.y_test,self.X_test = dump_load.load()
return
def visulize_mistake(self,y_test_pred):
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((self.y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(self.X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
return
def svm_classifier(self):
learning_rates = [1e-7, 3e-7,5e-7]
regularization_strengths = [5e4, 1e4]
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
num_iters = 1000
for learning_rate in learning_rates:
for regularization_strength in regularization_strengths:
print "learning_rage {:.2e}, regularization_strength {:.2e}".format(learning_rate, regularization_strength)
#train it
svm = LinearSVM()
svm.train(self.X_train_feats, self.y_train, learning_rate=learning_rate, reg=regularization_strength,
num_iters=num_iters, verbose=True)
#predict
y_train_pred = svm.predict(self.X_train_feats)
training_accuracy = np.mean(self.y_train == y_train_pred)
y_val_pred = svm.predict(self.X_val_feats)
validation_accuracy = np.mean(self.y_val == y_val_pred)
results[(learning_rate,regularization_strength)] = training_accuracy, validation_accuracy
print "train accurcy {}, validation {}".format(training_accuracy, validation_accuracy)
if validation_accuracy > best_val:
best_val = validation_accuracy
best_svm = svm
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(self.X_test_feats)
test_accuracy = np.mean(self.y_test == y_test_pred)
print test_accuracy
# self.visulize_mistake(y_test_pred)
return
def neural_network_classifier(self):
input_dim = self.X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
num_iters = 1800
batch_size=200
# hyperparameters
learning_rate = 5e-1
reg = 1e-6
learning_rate_decay = 0.95
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
net.train(self.X_train_feats, self.y_train, self.X_val_feats, self.y_val,
num_iters=num_iters,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_decay= learning_rate_decay,
reg=reg,
verbose=False)
# Predict on the validation set
val_acc = (net.predict(self.X_val_feats) == self.y_val).mean()
train_acc = (net.predict(self.X_train_feats) == self.y_train).mean()
print 'Train accuracy:{}, Validation accuracy:{}'.format(train_acc, val_acc)
test_acc = (net.predict(self.X_test_feats) == self.y_test).mean()
print test_acc
return
def run(self):
# self.explore_sift()
# self.save_sample_images()
self.load_data()
# self.svm_classifier()
self.neural_network_classifier()
return
if __name__ == "__main__":
obj= FeaturesModel()
obj.run()
|
mit
|
anielsen001/scipy
|
scipy/interpolate/_cubic.py
|
10
|
29293
|
"""Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
DOI:10.1137/0717021
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
DOI: http://dx.doi.org/10.1137/1.9780898717952
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
|
bsd-3-clause
|
lekshmideepu/nest-simulator
|
pynest/examples/brette_gerstner_fig_3d.py
|
8
|
3010
|
# -*- coding: utf-8 -*-
#
# brette_gerstner_fig_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Testing the adapting exponential integrate and fire model in NEST (Brette and Gerstner Fig 3D)
----------------------------------------------------------------------------------------------
This example tests the adaptive integrate and fire model (AdEx) according to
Brette and Gerstner [1]_ reproduces Figure 3D of the paper.
Note that Brette and Gerstner give the value for `b` in `nA`.
To be consistent with the other parameters in the equations, `b` must be
converted to `pA` (pico Ampere).
References
~~~~~~~~~~
.. [1] Brette R and Gerstner W (2005). Adaptive exponential integrate-and-fire model as an effective
description of neuronal activity J. Neurophysiology. https://doi.org/10.1152/jn.00686.2005
"""
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# First we make sure that the resolution of the simulation is 0.1 ms. This is
# important, since the slop of the action potential is very steep.
res = 0.1
nest.SetKernelStatus({"resolution": res})
neuron = nest.Create("aeif_cond_exp")
###############################################################################
# Set the parameters of the neuron according to the paper.
neuron.set(V_peak=20., E_L=-60.0, a=80.0, b=80.5, tau_w=720.0)
###############################################################################
# Create and configure the stimulus which is a step current.
dc = nest.Create("dc_generator")
dc.set(amplitude=-800.0, start=0.0, stop=400.0)
###############################################################################
# We connect the DC generators.
nest.Connect(dc, neuron, 'all_to_all')
###############################################################################
# And add a ``voltmeter`` to sample the membrane potentials from the neuron
# in intervals of 0.1 ms.
voltmeter = nest.Create("voltmeter", params={'interval': 0.1})
nest.Connect(voltmeter, neuron)
###############################################################################
# Finally, we simulate for 1000 ms and plot a voltage trace to produce the
# figure.
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
plt.axis([0, 1000, -85, 0])
plt.show()
|
gpl-2.0
|
alisidd/tensorflow
|
tensorflow/examples/learn/text_classification_character_cnn.py
|
30
|
4292
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for
DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a ReLU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
ledo01/Bulle-simulation
|
reso.py
|
1
|
1243
|
import numpy as np # Manipulation des arraysm
from matplotlib.pyplot import * # Graphiques
from math import pi
def detachement(X):
"""Determine avec la forme de la bulle si il y a un detachement"""
d = np.diff(X)
z = np.diff(np.nonzero(np.logical_and(d>-5e-9,d<5e-9)))
return np.any(np.diff(z) > 10)
def volume(X):
return sum(pi*X**2)
R = 0.776617175541 # Valeur initiales de Ro
B = 1
h = 3.2 # Hauteur de la bulle
dz = 0.0001
fig, (ax1,ax2) = subplots(1,2)
e = 1
# cmap = get_cmap('viridis')
# colors = [cmap(i) for i in np.linspace(0,1,50)]
# ax.set_color_cycle(colors) # set_prop_cycle ??
while abs(e) > 1e-10 : # On répète la solution jusqu'à erreur < 10^-10
z = np.arange(0,h,dz)
X = np.zeros(np.size(z))
x = 0.00001
xp = 0
for i in range(np.size(z)):
xpp = ((1+xp**2)/(x)) + ((B*z[i] - 2/R)*((1+xp**2)**(1.5)))
xp += xpp*dz
x += xp*dz
X[i] = h-x
Xl = X[::-1] - h
Xr = h -X[::-1]
largeur = Xr[0] - Xl[0] # Calcul de la largeur
# Ajustement de Ro par rapport à la largeur
e = 2 - largeur
R += 0.1 * e
ax1.plot(Xl,z,'b',label=r'$h = $%d'%(h))
ax1.plot(Xr,z,'b')
ylabel('y*')
xlabel('x*')
ax2.plot(np.diff(Xl))
show()
print(volume(Xl))
|
mit
|
cisco-oss-eng/perftools
|
perfwhiz/perfmap.py
|
2
|
13325
|
#!/usr/bin/env python
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Author: Alec Hothan
# ---------------------------------------------------------
from optparse import OptionParser
import os
import sys
import warnings
import pandas
from pandas import DataFrame
import json
import traceback
from perf_formatter import open_cdict
from perfmap_common import set_html_file
from perfmap_common import DfDesc
from perfmap_common import output_svg_html
from perfmap_core import get_coremaps
from perfmap_kvm_exit_types import get_swkvm_data
from perfmap_sw_kvm_exits import get_sw_kvm_events
from jinja2 import Environment
from jinja2 import FileSystemLoader
import time
from __init__ import __version__
from pkg_resources import resource_filename
import zlib
import base64
# Global variables
output_chart = None
# start analysis after first from_time usec
from_time = 0
# cap input file to first cap_time usec, 0 = unlimited
cap_time = 0
def get_full_task_name(df, task):
# if task is a number it is considered to be a pid ID
# if text it is a task name
try:
tid = int(task)
# tid given
df = df[df['pid'] == tid]
task = None
# get the task name from the tid
except ValueError:
# task given: find corresponding tid
df = df[df['task_name'] == task]
tid = 0
if df.empty:
print 'No selection matching the task ' + task
return (None, None)
# fill in the missing information
if not tid:
tid = df['pid'].iloc[0]
if not task:
task = df['task_name'].iloc[0]
task = task + ':' + str(tid)
return (df, task)
def show_successors(df, task, label):
df, task = get_full_task_name(df, task)
if not task:
return
df = df[df['event'] == 'sched__sched_switch']
# aggregate all the per core tasks (e.g. swapper/0 -> swapper)
df['next_comm'] = df['next_comm'].str.replace(r'/.*$', '')
series_percent = df.next_comm.value_counts(normalize=True)
series_count = df.next_comm.value_counts()
series_percent = pandas.Series(["{0:.2f}%".format(val * 100) for val in series_percent],
index=series_percent.index)
series_percent.name = 'percent'
series_count.name = 'count'
print 'Successors of %s (%s)' % (task, label)
print pandas.concat([series_count, series_percent], axis=1)
def set_short_names(dfds):
'''
Reduce the names of a list of dataframe descriptors to minimal non matching characters
This will basically trim from the start and end all common strings and keep only the
non matching part of the names.
Example of names: ../../haswell/h1x216.cdict ../../haswell/h5x113.cdict
Resulting reduced names: h1x216 h5x113
Caveat: do not strip out any numeric digits at both ends, e.g. 01x218.cdict 02x208.cdict
must return [1x218, 2x208] must not cut out the trailing 8
'''
name_list = [dfd.name for dfd in dfds]
if len(name_list) < 2:
return
strip_head = None
strip_tail = None
for name in name_list:
if not strip_head:
strip_head = name
strip_tail = name
continue
# because there are no duplicates we know that
# at least 1 character difference between all keys
# find longest match from head
max_index = min(len(name), len(strip_head))
for index in range(max_index):
if name[index] != strip_head[index] or name[index].isdigit():
strip_head = name[:index]
break
# find longest match from tail
max_index = min(len(name), len(strip_tail))
for index in range(-1, -max_index - 1, -1):
# stop when mismatch or when hitting a numeric letter
if name[index] != strip_tail[index] or name[index].isdigit():
if index == -1:
strip_tail = ''
else:
strip_tail = name[1 + index:]
break
# strip all names and store in the short_name field
for dfd in dfds:
dfd.short_name = dfd.name[len(strip_head):]
if strip_tail:
dfd.short_name = dfd.short_name[:-len(strip_tail)]
def get_info(dfd, label, max_core=32):
# allow at least 32 cores
if max_core < 32:
max_core = 32
# Other misc information in the chart
return {
"label": label,
"window": "{:,d}".format((dfd.to_usec - dfd.from_usec) / 1000),
"date": time.strftime("%d-%b-%Y"), # 01-Jan-2016 format
"max_cores": max_core,
"version": __version__
}
def get_tpl(tpl_file):
local_path = resource_filename(__name__, tpl_file)
template_loader = FileSystemLoader(searchpath=os.path.dirname(local_path))
template_env = Environment(loader=template_loader, trim_blocks=True, lstrip_blocks=True)
return template_env.get_template(tpl_file)
def create_charts(dfds, cap_time_usec, task_re, label):
coremaps, max_core = get_coremaps(dfds, cap_time_usec, task_re)
task_list, exit_reason_list, colormap_list = get_swkvm_data(dfds, cap_time_usec, task_re)
tpl = get_tpl('perfmap_charts.jinja')
svg_html = tpl.render(exit_reason_list=str(exit_reason_list),
task_list=task_list,
colormap_list=str(colormap_list),
coremaps=coremaps,
info=get_info(dfds[0], label, max_core))
output_svg_html(svg_html, 'charts', task_re)
def create_heatmaps(dfd, cap_time_usec, task_re, label):
swk_events = get_sw_kvm_events(dfd, task_re)
tpl = get_tpl('perfmap_heatmaps.jinja')
json_swk = json.dumps(swk_events, separators=(',', ':'))
b64_zlib = base64.b64encode(zlib.compress(json_swk))
svg_html = tpl.render(swk_events=b64_zlib,
info=get_info(dfd, label))
output_svg_html(svg_html, 'heatmaps', task_re)
# ---------------------------------- MAIN -----------------------------------------
def main():
global from_time
global cap_time
# Suppress future warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
parser = OptionParser(usage="usage: %prog [options] <cdict_file1> [cdict_file2...]")
parser.add_option("-t", "--task",
dest="task",
metavar="task name (regex)",
help="selected task(s) (regex on task name)"
)
parser.add_option("--label",
dest="label",
metavar="label",
help="label for the title (defaults to the cdict file name)"
)
parser.add_option("--map",
dest="map",
action="store",
metavar="mapping csv file",
help="remap task names from mapping csv file"
)
parser.add_option("--output-dir",
dest="output_dir",
action="store",
metavar="dir pathname",
help="output all html files to the provided directory"
)
parser.add_option("--heatmaps",
dest="heatmaps",
action="store_true",
help="generate heatmap charts only (default: generate basic charts only)"
)
parser.add_option("--headless",
dest="headless",
action="store_true",
help="do not show chart in the browser (default=False)"
)
parser.add_option("-c", "--cap",
dest="cap_time",
help="(optional) cap the analysis to first <cap_time> msec"
" of capture (default=all)"
)
parser.add_option("-f", "--from",
dest="from_time",
help="(optional) start the analysis after first <from_time> msec"
" of capture (default=0)"
)
parser.add_option("--merge-sys-tasks",
dest="merge_sys_tasks",
action="store_true",
help="group all system tasks (e.g. swapper/0 -> swapper)"
)
parser.add_option("--append-tid",
dest="append_tid",
action="store_true",
help="append tid to task name (e.g. perf -> perf:2834)"
)
parser.add_option("--successors-of",
dest="successor_of_task",
help="only show list of successors of given tid or task name"
)
parser.add_option("--list",
dest="list",
action="store_true",
default=False,
help="only show list of all tasks with event count"
)
(options, args) = parser.parse_args()
if options.from_time:
from_time = int(options.from_time) * 1000
if options.cap_time:
# convert to usec
cap_time = int(options.cap_time) * 1000 + from_time
if not args:
print 'Missing cdict file'
sys.exit(1)
if options.output_dir:
if not os.path.isdir(options.output_dir):
print('Invalid output directory: ' + options.output_dir)
sys.exit(1)
dfds = []
cdict_files = args
if len(cdict_files):
if len(cdict_files) > 1:
html_filename = cdict_files[0] + '-diff'
else:
html_filename = cdict_files[0]
else:
html_filename = cdict_files[0] if len(cdict_files) else 'perfwhiz'
set_html_file(html_filename, options.headless, options.label, options.output_dir)
# get smallest capture window of all cdicts
min_cap_usec = 0
for cdict_file in cdict_files:
perf_dict = open_cdict(cdict_file, options.map)
df = DataFrame(perf_dict)
dfd = DfDesc(cdict_file, df, options.merge_sys_tasks, options.append_tid)
dfds.append(dfd)
last_usec = df['usecs'].iloc[-1]
if min_cap_usec == 0:
min_cap_usec = last_usec
else:
min_cap_usec = min(min_cap_usec, last_usec)
if from_time and from_time >= min_cap_usec:
print 'Error: from time cannot be larger than %d msec' % (min_cap_usec / 1000)
sys.exit(2)
# if a cap time is provided, always use that value (even if it is >min_cap_usec)
if not cap_time:
cap_time = min_cap_usec
# normalize all dataframes
for dfd in dfds:
dfd.normalize(from_time, cap_time)
# at this point some cdict entries may have "missing" data
# if the requested cap_time is > the cdict cap time
# the relevant processing will extrapolate when needed (and if possible)
# reduce all names to minimize the length of the cdict file name
set_short_names(dfds)
if not options.label:
if len(dfds) > 1:
options.label = 'diff'
else:
options.label = os.path.splitext(os.path.basename(cdict_file))[0]
if options.list:
print 'List of tids and task names sorted by context switches and kvm event count'
for dfd in dfds:
print dfd.name + ':'
res = dfd.df.groupby(['pid', 'task_name']).size()
res.sort_values(ascending=False, inplace=True)
pandas.set_option('display.max_rows', len(res))
print res
sys.exit(0)
if options.successor_of_task:
for dfd in dfds:
print dfd.name + ':'
show_successors(dfd.df, options.successor_of_task, options.label)
sys.exit(0)
# These options can be cumulative and all require a --task parameter to select tasks
if not options.task:
print '--task <task_regex> is required'
sys.exit(1)
# A common mistake is to forget the head "." before a star ("*.vcpu0")
# Better detect and fix to avoid frustration
if options.task.startswith("*"):
options.task = "." + options.task
# create heatmaps only if one cdict was given
if options.heatmaps:
if len(dfds) == 1:
create_heatmaps(dfds[0], cap_time, options.task, options.label)
else:
print 'Error: --heat-maps requires 1 cdict file only'
sys.exit(1)
else:
create_charts(dfds, cap_time, options.task, options.label)
if __name__ == '__main__':
try:
main()
except Exception as ex:
print
traceback.print_exc()
print
sys.exit(1)
|
apache-2.0
|
NLeSC/pointcloud-benchmark
|
python/pointcloud/run/results/plot_oracle_inc_wspeed.py
|
1
|
2786
|
#!/usr/bin/env python
################################################################################
# Created by Oscar Martinez #
# [email protected] #
################################################################################
import matplotlib, sys, numpy, os
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pointcloud import utils
inputFile = os.path.abspath(sys.argv[1])
command = 'cat ' + inputFile + ' | grep "Read "'
lines = utils.shellExecute(command).split('\n')
nums = []
reads = []
sorts = []
writes = []
for line in lines:
if line != '':
fields = line.split()
try:
num = float(fields[1].replace(',','')) / 1000000.
read = float(fields[3].replace(',',''))
sort = float(fields[6].replace(',',''))
write = float(fields[9].replace(',',''))
nums.append(num)
reads.append(read)
sorts.append(sort)
writes.append(write)
except:
print 'skipped line!'
xs = range(1,len(nums)+1)
nums = numpy.array(nums)
reads = numpy.array(reads)
sorts = numpy.array(sorts)
writes = numpy.array(writes)
totals = reads+sorts+writes
print
print '#Files = ' + str(len(nums))
print '#Points = ' + str(int(sum(nums))) + ' Mpts'
plots = [
('Reads', reads),
('Sorts', sorts),
('Writes', writes),
('Totals', totals),
]
fig = plt.figure(figsize = (15,7), dpi = 75)
ax1 = fig.add_subplot(111)
ax2 = ax1.twiny()
labels = []
rects = []
for i in range(len(plots)):
ys = nums / plots[i][1]
avg = ys[ys != numpy.inf].mean()
print 'Avg. ' + plots[i][0] + ' = ' + ('%.2f' % avg)
# if len(ys) > 10:
# print 'Last ' + plots[i][0] + ' = ' , ys[-10:-1]
# else:
# print 'Last ' + plots[i][0] + ' = ' + ('%.2f' % ys[-1])
print 'Last ' + plots[i][0] + ' = ' + ('%.2f' % ys[-1])
ax1.plot(xs, ys, alpha=0.6, linestyle = '-', color=utils.PCOLORS[i], label = plots[i][0])
rects.append(plt.Rectangle((0, 0), 1, 1, fc=utils.PCOLORS[i]))
labels.append(plots[i][0])
ax1.set_xlabel('File counter')
ax1.set_ylabel('Mpts/s')
if len(sys.argv) > 2:
(miny,maxy) = sys.argv[2].split(',')
ax1.set_ylim([float(miny), float(maxy)])
ax1.autoscale(axis='x', tight=True)
def cumulative_sum(values, start=0):
for v in values:
start += v
yield start
anums = list(cumulative_sum(nums))
ax2.plot(anums, anums) # Create a dummy plot
ax2.cla()
ax2.set_xlabel("#Mpts")
ax2.autoscale(axis='x', tight=True)
title = ax1.set_title("Oracle blocks loading")
title.set_y(1.1)
fig.subplots_adjust(top=0.85)
plt.legend(rects, labels)
#fig.gca().legend()
fig.savefig(inputFile+ '.png')
|
apache-2.0
|
derekjchow/models
|
research/tcn/estimators/svtcn_loss_test.py
|
5
|
3694
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for svtcn_loss.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from estimators import svtcn_loss
import tensorflow as tf
class SVTCNLoss(tf.test.TestCase):
def testSVTCNLoss(self):
with self.test_session():
num_data = 64
num_sequences = 2
num_data_per_seq = num_data // num_sequences
feat_dim = 6
margin = 1.0
times = np.tile(np.arange(num_data_per_seq, dtype=np.int32),
num_sequences)
times = np.reshape(times, [times.shape[0], 1])
sequence_ids = np.concatenate(
[np.ones(num_data_per_seq)*i for i in range(num_sequences)])
sequence_ids = np.reshape(sequence_ids, [sequence_ids.shape[0], 1])
pos_radius = 6
neg_radius = 12
embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
# Compute the loss in NP
# Get a positive mask, i.e. indices for each time index
# that are inside the positive range.
in_pos_range = np.less_equal(
np.abs(times - times.transpose()), pos_radius)
# Get a negative mask, i.e. indices for each time index
# that are inside the negative range (> t + (neg_mult * pos_radius)
# and < t - (neg_mult * pos_radius).
in_neg_range = np.greater(np.abs(times - times.transpose()), neg_radius)
sequence_adjacency = sequence_ids == sequence_ids.T
sequence_adjacency_not = np.logical_not(sequence_adjacency)
pdist_matrix = euclidean_distances(embedding, squared=True)
loss_np = 0.0
num_positives = 0.0
for i in range(num_data):
for j in range(num_data):
if in_pos_range[i, j] and i != j and sequence_adjacency[i, j]:
num_positives += 1.0
pos_distance = pdist_matrix[i][j]
neg_distances = []
for k in range(num_data):
if in_neg_range[i, k] or sequence_adjacency_not[i, k]:
neg_distances.append(pdist_matrix[i][k])
neg_distances.sort() # sort by distance
chosen_neg_distance = neg_distances[0]
for l in range(len(neg_distances)):
chosen_neg_distance = neg_distances[l]
if chosen_neg_distance > pos_distance:
break
loss_np += np.maximum(
0.0, margin - chosen_neg_distance + pos_distance)
loss_np /= num_positives
# Compute the loss in TF
loss_tf = svtcn_loss.singleview_tcn_loss(
embeddings=tf.convert_to_tensor(embedding),
timesteps=tf.convert_to_tensor(times),
pos_radius=pos_radius,
neg_radius=neg_radius,
margin=margin,
sequence_ids=tf.convert_to_tensor(sequence_ids),
multiseq=True
)
loss_tf = loss_tf.eval()
self.assertAllClose(loss_np, loss_tf)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
andyraib/data-storage
|
python_scripts/env/lib/python3.6/site-packages/matplotlib/tri/tricontour.py
|
10
|
10196
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.contour import ContourSet
from matplotlib.tri.triangulation import Triangulation
import matplotlib._tri as _tri
import numpy as np
class TriContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions for
a triangular grid.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw triangular grid contour lines or filled regions,
depending on whether keyword arg 'filled' is False
(default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in TriContourSet.tricontour_doc.
"""
ContourSet.__init__(self, ax, *args, **kwargs)
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], TriContourSet):
C = args[0].cppContourGenerator
if self.levels is None:
self.levels = args[0].levels
else:
tri, z = self._contour_args(args, kwargs)
C = _tri.TriContourGenerator(tri.get_cpp_triangulation(), z)
self._mins = [tri.x.min(), tri.y.min()]
self._maxs = [tri.x.max(), tri.y.max()]
self.cppContourGenerator = C
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for lower, upper in zip(lowers, uppers):
segs, kinds = self.cppContourGenerator.create_filled_contour(
lower, upper)
allsegs.append([segs])
allkinds.append([kinds])
else:
allkinds = None
for level in self.levels:
segs = self.cppContourGenerator.create_contour(level)
allsegs.append(segs)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args,
**kwargs)
z = np.asarray(args[0])
if z.shape != tri.x.shape:
raise ValueError('z array must have same length as triangulation x'
' and y arrays')
self.zmax = z.max()
self.zmin = z.min()
if self.logscale and self.zmin <= 0:
raise ValueError('Cannot %s log of negative values.' % fn)
self._contour_level_args(z, args[1:])
return (tri, z)
tricontour_doc = """
Draw contours on an unstructured triangular grid.
:func:`~matplotlib.pyplot.tricontour` and
:func:`~matplotlib.pyplot.tricontourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
The triangulation can be specified in one of two ways; either::
tricontour(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tricontour(x, y, ...)
tricontour(x, y, triangles, ...)
tricontour(x, y, triangles=triangles, ...)
tricontour(x, y, mask=mask, ...)
tricontour(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments may be::
tricontour(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation. The level values are chosen
automatically.
::
tricontour(..., Z, N)
contour *N* automatically-chosen levels.
::
tricontour(..., Z, V)
draw contour lines at the values specified in sequence *V*,
which must be in increasing order.
::
tricontourf(..., Z, V)
fill the (len(*V*)-1) regions between the values in *V*,
which must be in increasing order.
::
tricontour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
``C = tricontour(...)`` returns a
:class:`~matplotlib.contour.TriContourSet` object.
Optional keyword arguments:
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*levels* [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw, in increasing order; e.g., to draw just
the zero contour pass ``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
tricontour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
tricontourf-only keyword arguments:
*antialiased*: [ *True* | *False* ]
enable antialiasing
Note: tricontourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/tricontour_demo.py
"""
def tricontour(ax, *args, **kwargs):
if not ax._hold:
ax.cla()
kwargs['filled'] = False
return TriContourSet(ax, *args, **kwargs)
tricontour.__doc__ = TriContourSet.tricontour_doc
def tricontourf(ax, *args, **kwargs):
if not ax._hold:
ax.cla()
kwargs['filled'] = True
return TriContourSet(ax, *args, **kwargs)
tricontourf.__doc__ = TriContourSet.tricontour_doc
|
apache-2.0
|
deepakantony/sms-tools
|
lectures/04-STFT/plots-code/window-size.py
|
22
|
1498
|
import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 128
start = .81*fs
x1 = x[start:start+N]
plt.figure(1, figsize=(9.5, 6))
plt.subplot(321)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x1*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x1, M = 128')
mX, pX = DF.dftAnal(x1, np.hamming(N), N)
plt.subplot(323)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX1')
plt.subplot(325)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX1')
N = 1024
start = .81*fs
x2 = x[start:start+N]
mX, pX = DF.dftAnal(x2, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x2*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x2), max(x2)])
plt.title('x2, M = 1024')
plt.subplot(324)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX2')
plt.subplot(326)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX2')
plt.tight_layout()
plt.savefig('window-size.png')
plt.show()
|
agpl-3.0
|
whiskie14142/SolarSystemVoyager
|
source/flightreview.py
|
1
|
11249
|
# -*- coding: utf-8 -*-
"""
flightreview module for SSVG (Solar System Voyager)
(c) 2016-2019 Shushi Uetsuki (whiskie14142)
"""
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import numpy as np
import matplotlib.pyplot as plt
import common
from twobodypred import TwoBodyPred
from globaldata import *
# Import followings
# g : container of global data
# erase_Ptrj()
# draw_Ptrj()
# erase_PKepler()
# draw_PKepler()
# erase_TKepler()
# draw_TKepler()
# remove_planets()
# replot_planets(jd)
# remove_time()
# replot_time(jd, ttype='')
# nowtimestr()
# nowtimestrf()
from ui.flightreviewcontrol import *
class FlightReviewControl(QDialog):
"""class for 'Flight Review' window
"""
def __init__(self, parent=None):
super().__init__(parent)
flags = self.windowFlags() ^ Qt.WindowContextHelpButtonHint
self.setWindowFlags(flags)
left = g.mainform.geometry().left()
top = g.mainform.geometry().top()
self.setGeometry(left, top+740, 640, 211)
self.ui = Ui_FlightReviewControl()
self.ui.setupUi(self)
self._translate = QtCore.QCoreApplication.translate
self.timecap_real = self._translate('flightreview.py', 'Real')
self.sysMes01 = self._translate('flightreview.py', 'Received: FLYTO record, from SSVG')
self.sysMes02 = self._translate('flightreview.py', 'Reviewing: FLYTO, Line {}')
# Get Settings of 'Look at' from showorbitsettings
if g.showorbitsettings is not None:
s = g.showorbitsettings
self.ui.tobarycenter.setChecked(s['SSB'])
self.ui.toprobe.setChecked(s['Probe'])
self.ui.totarget.setChecked(s['Target'])
self.ui.forward.clicked.connect(self.forward)
self.ui.backward.clicked.connect(self.backward)
self.ui.fastforward.clicked.connect(self.fastforward)
self.ui.fastbackward.clicked.connect(self.fastbackward)
self.ui.check_Ptrj.clicked.connect(self._statuschanged)
self.ui.check_PKepler.clicked.connect(self._statuschanged)
self.ui.check_TKepler.clicked.connect(self._statuschanged)
self.ui.showplanets.clicked.connect(self._statuschanged)
self.ui.tobarycenter.clicked.connect(self._statuschanged)
self.ui.toprobe.clicked.connect(self._statuschanged)
self.ui.totarget.clicked.connect(self._statuschanged)
self.artist_of_probe = None
self.artist_of_target = None
self.artist_of_sun = None
self.artist_of_epssinfo = None
self.tbpred = None
self.reset()
def reset(self):
self.c_index = 0
self.ui.backward.setEnabled(False)
self.ui.fastbackward.setEnabled(False)
self.redraw()
def redraw(self):
if g.myprobe is None:
QMessageBox.information(self, 'Info',
'You have no valid probe.', QMessageBox.Ok)
return
if not g.myprobe.onflight:
QMessageBox.information(self, 'Info',
'Your probe has no valid orbit.', QMessageBox.Ok)
return
if g.myprobe.trj_record[-1][0]['type'] != 'FLYTO':
QMessageBox.information(self, 'Info',
'Last maneuver was not FLYTO.', QMessageBox.Ok)
return
self.last_trj = g.probe_trj[-1][1:]
self.maninfo = g.probe_trj[-1][0]
self.start_time = self.last_trj[0][0]
self.ui.starttime.setText(common.jd2isot(self.start_time))
xs, ys, zs, ts = g.mytarget.points(self.start_time, g.ndata)
g.target_Kepler = [xs, ys, zs]
erase_Ptrj()
if self.ui.check_Ptrj.isChecked():
draw_Ptrj()
erase_TKepler()
if self.ui.check_TKepler.isChecked():
draw_TKepler()
self._redrawmark()
self.ui.sysMessage.appendPlainText(self.sysMes01)
self.ui.sysMessage.appendPlainText(self.sysMes02.format(g.nextman))
self.ui.sysMessage.centerCursor()
def _redrawmark(self):
c_time = self.last_trj[0][self.c_index]
delta_jd = c_time - self.start_time
self.ui.currenttime.setText(common.jd2isot(c_time))
self.ui.delta_t_edit.setText('{:.8f}'.format(delta_jd))
ppos = np.zeros(3)
pvel = np.zeros(3)
ppos[0] = self.last_trj[1][self.c_index]
ppos[1] = self.last_trj[2][self.c_index]
ppos[2] = self.last_trj[3][self.c_index]
pvel[0] = self.last_trj[4][self.c_index]
pvel[1] = self.last_trj[5][self.c_index]
pvel[2] = self.last_trj[6][self.c_index]
ssacc = self.last_trj[7][self.c_index]
erase_PKepler()
if self.ui.check_PKepler.isChecked():
if self.tbpred is None:
self.tbpred = TwoBodyPred(g.myprobe.name)
self.tbpred.fix_state(c_time, ppos, pvel)
x, y, z, t = self.tbpred.points(g.ndata_s)
g.probe_Kepler = [x, y, z]
draw_PKepler()
target_pos, target_vel = g.mytarget.posvel(c_time)
sun_pos, sun_vel = common.SPKposvel(10, c_time)
xlim = g.ax.get_xlim()
hw = (xlim[1] - xlim[0]) * 0.5
if self.ui.tobarycenter.isChecked():
cent = [0.0, 0.0, 0.0]
elif self.ui.toprobe.isChecked():
cent = ppos
else:
cent = target_pos
g.ax.set_xlim(cent[0]-hw, cent[0]+hw)
g.ax.set_ylim(cent[1]-hw, cent[1]+hw)
g.ax.set_zlim(cent[2]-hw, cent[2]+hw)
if self.artist_of_probe is not None:
self.artist_of_probe.remove()
self.artist_of_probe = None
self.artist_of_probe = g.ax.scatter(*ppos, s=40, c='r',
depthshade=False, marker='x')
if self.artist_of_target is not None:
self.artist_of_target.remove()
self.artist_of_target = None
self.artist_of_target = g.ax.scatter(*target_pos, s=50, c='g',
depthshade=False, marker='+')
if self.artist_of_sun is not None:
self.artist_of_sun.remove()
self.artist_of_sun = None
self.artist_of_sun = g.ax.scatter(*sun_pos, s=50, c='#FFAF00',
depthshade=False, marker='o')
if self.artist_of_epssinfo is not None:
self.artist_of_epssinfo.remove()
self.artist_of_epssinfo = None
epsstext = ''
if self.maninfo['epon']:
epsstext = epsstext + ' EP(' + self.maninfo['epmode'] + ')'
if self.maninfo['sson']:
epsstext = epsstext + ' SS({0}) SSacc={1:.3f}'.format(
self.maninfo['ssmode'], ssacc)
self.artist_of_epssinfo = g.ax.text(*ppos, epsstext, color='r',
fontsize=10)
# redraw planets
remove_planets()
if self.ui.showplanets.isChecked():
replot_planets(c_time)
remove_time()
replot_time(c_time, self.timecap_real)
if g.fig is not None: plt.draw()
# display relative position and velocity
rel_pos = target_pos - ppos
rel_pos = common.eclv2lv(rel_pos, ppos, pvel, sun_pos, sun_vel)
trange, tphi, telv = common.rect2polar(rel_pos)
rel_vel = target_vel - pvel
rel_vel = common.eclv2lv(rel_vel, ppos, pvel, sun_pos, sun_vel)
relabsvel, tvphi, tvelv = common.rect2polar(rel_vel)
losvel = np.dot(rel_vel, rel_pos) / trange
self.ui.RPTrange.setText('{:.3f}'.format(trange / 1000.0))
self.ui.RPTphi.setText('{:.2f}'.format(tphi))
self.ui.RPTelv.setText('{:.2f}'.format(telv))
self.ui.RVTvel.setText('{:.3f}'.format(relabsvel))
self.ui.RVTphi.setText('{:.2f}'.format(tvphi))
self.ui.RVTelv.setText('{:.2f}'.format(tvelv))
self.ui.LoSVvel.setText('{:.3f}'.format(losvel))
def forward(self):
if self.c_index + 1 < len(self.last_trj[0]):
self.c_index += 1
self._redrawmark()
self.ui.backward.setEnabled(True)
self.ui.fastbackward.setEnabled(True)
else:
return
if self.c_index + 1 == len(self.last_trj[0]):
self.ui.forward.setEnabled(False)
self.ui.fastforward.setEnabled(False)
def backward(self):
if self.c_index > 0:
self.c_index -= 1
self._redrawmark()
self.ui.forward.setEnabled(True)
self.ui.fastforward.setEnabled(True)
else:
return
if self.c_index == 0:
self.ui.backward.setEnabled(False)
self.ui.fastbackward.setEnabled(False)
def fastforward(self):
if self.c_index == len(self.last_trj[0]) - 1: return
hopping = self.ui.timescale.value()
self.c_index += hopping
if self.c_index >= len(self.last_trj[0]):
self.c_index = len(self.last_trj[0]) - 1
self._redrawmark()
self.ui.backward.setEnabled(True)
self.ui.fastbackward.setEnabled(True)
if self.c_index + 1 == len(self.last_trj[0]):
self.ui.forward.setEnabled(False)
self.ui.fastforward.setEnabled(False)
def fastbackward(self):
if self.c_index == 0: return
hopping = self.ui.timescale.value()
self.c_index -= hopping
if self.c_index < 0:
self.c_index = 0
self._redrawmark()
self.ui.forward.setEnabled(True)
self.ui.fastforward.setEnabled(True)
if self.c_index == 0:
self.ui.backward.setEnabled(False)
self.ui.fastbackward.setEnabled(False)
def _statuschanged(self):
erase_Ptrj()
if self.ui.check_Ptrj.isChecked():
draw_Ptrj()
erase_TKepler()
if self.ui.check_TKepler.isChecked():
draw_TKepler()
self.save_settings()
self._redrawmark()
def save_settings(self):
if g.showorbitsettings is not None:
s = g.showorbitsettings
s['SSB'] = self.ui.tobarycenter.isChecked()
s['Probe'] = self.ui.toprobe.isChecked()
s['Target'] = self.ui.totarget.isChecked()
g.showorbitsettings = s
def closeEvent(self, event):
g.flightreviewcontrol = None
event.accept()
if self.artist_of_probe is not None:
self.artist_of_probe.remove()
self.artist_of_probe = None
if self.artist_of_target is not None:
self.artist_of_target.remove()
self.artist_of_target = None
if self.artist_of_sun is not None:
self.artist_of_sun.remove()
self.artist_of_sun = None
if self.artist_of_epssinfo is not None:
self.artist_of_epssinfo.remove()
self.artist_of_epssinfo = None
erase_Ptrj()
erase_PKepler()
erase_TKepler()
remove_planets()
remove_time()
|
gpl-3.0
|
vdods/vorpy
|
vorpy/experimental/J_vs_lambda.py
|
1
|
1140
|
import matplotlib.pyplot as plt
import numpy as np
import pathlib
import sys
import vorpy.pickle
if __name__ == '__main__':
J_v = []
lam_v = []
for pickle_p in map(pathlib.Path, map(str.strip, sys.stdin.readlines())):
data_d = vorpy.pickle.unpickle(pickle_filename=str(pickle_p), log_out=sys.stdout)
J_v.append(data_d['J_initial'])
lam_v.append(data_d['lam'])
row_count = 1
col_count = 1
size = 8
fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(size*col_count,size*row_count))
axis = axis_vv[0][0]
axis.set_title('(J,lambda)')
axis.scatter(J_v, lam_v)
plot_p = pathlib.Path('J_vs_lam.png')
fig.tight_layout()
plot_p.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(str(plot_p), bbox_inches='tight')
print(f'wrote to file "{plot_p}"')
# VERY important to do this -- otherwise your memory will slowly fill up!
# Not sure which one is actually sufficient -- apparently none of them are, YAY!
plt.clf()
plt.cla()
plt.close()
plt.close(fig)
plt.close('all')
del fig
del axis_vv
|
mit
|
jereze/scikit-learn
|
sklearn/neighbors/graph.py
|
208
|
7031
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
|
bsd-3-clause
|
manashmndl/scikit-learn
|
examples/classification/plot_lda_qda.py
|
164
|
4806
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
|
bsd-3-clause
|
lorgor/vulnmine
|
vulnmine/ml.py
|
1
|
14873
|
"""ml: Do Machine Learning Classification of test data.
Purpose
=======
The ml module does tha ML Classification using the Random Forest
Classifier.
There are in fact 2 different models being used:
* CPE "Vendor" to SCCM "Publisher" matching
* CPE "Software" to SCCM "Software" inventory data
The appropriate model is chosen at class initialization.
Manually labelled data is used to update the data to be classified. This
eliminates "known" matches from further processing.
The actual ML classification is done.
Then data is post-processed. Any duplicate records are eliminated. Then the
manually matched data is appended to the new classified data to form the final
dataframe.
Public classes
==============
MLClassify ML Classification using Random Forest Classifier
Restrictions
------------
The following restrictions should be clearly understood and respected.
* Fields and features *have* to be exactly as specified (same order, same
names) as in the initialization code.
* Labelled data always has a "match" attribute field.
"""
import pandas as pd
import numpy as np
from sklearn.externals import joblib
import logging
import sys
import gbls
import utils
# Public classes
__all__ = (
'MLClassify'
)
class MLClassify(object):
"""Match NVD CPE "Software" data to SCCM "Software" inventory data.
Actions
-------
* There are two different models being used for classification. The
appropriate model is input when the class is initialized. At the same
time the key and feature lists are initializedd acccordingg to the
model chosen.
* The new data to be classified is updated using manually labelled data.
This ensures that only new unclassified data is input to ML
classification.
* The Random Forest Classification algorithm is run.
* The newly-classifed test data is concatenated with the manually
labelled data. Duplicates are eliminated to form the final resulting
dataframe.
Methods
-------
__init__ The class constructor which:
- initializes logging
- initializes the key list and feature list variables
- reads in the ML Model serialized data.
upd_using_labelled_data
Updates data to be classified using the manually labelled data
ml_classify Do the ML Random Forest classification
post_process_matched_data
Concatenate the new classified data with the manally labelled
data. Eliminate any duplicate data records.
Exceptions
----------
IOError Log error message and ignore
Restrictions
------------
The key and feature field lists *must* be kept in the specified order
since that is # how the ML algorithm was originally trained.
(See the sccmgbl module for the corresponding definitions.)
Returns
-------
None
"""
def __init__(
self,
type_data=None,
mylogger=None
):
"""Initialize class by configuring logging, initializing dataframe.
Actions
-------
This is the class constructor. To initialize:
* Initialize logging
* Initialize class attributes: key_list, feature_list, attr_list
* Input the correct model from the serialized disk file.
I/P Parameters
--------------
type_data specifies which model is to be used:
'vendor' CPE Vendor - SCCM Publisher0
'software' CPE Sofware - SCCM Software inventory
mylogger logging object. If None, then a new object is initialzed.
"""
# Configure logging
if mylogger is None:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(gbls.loglvl)
else:
self.logger = mylogger
# Initialize key list and feature list for this model
# Input the respective serialized ML model
self.logger.info(
'\n\nInitializing ML_Match class\n\n'
'Type of data to be classified: \n{0}\n\n'.format(type_data)
)
if (type_data == 'vendor'):
self._key_list = gbls.vendor_key_list
self._feature_list = gbls.vendor_feature_list
self._attr_list = gbls.vendor_attr_list
model = gbls.clf_vendor
elif (type_data == 'software'):
self._key_list = gbls.sft_key_list
self._feature_list = gbls.sft_feature_list
self._attr_list = gbls.sft_attr_list
model = gbls.clf_software
else:
self.logger.critical(
'*** Bad input when initializing the ML_Match class\n\n'
)
return None
# Input the serialized ML model
try:
self.__clf = joblib.load(model)
except IOError as e:
self.logger.critical(
'*** I/O error ML Model({0}): {1}\n\n'.format(
e.errno, e.strerror
)
)
except:
self.logger.critical(
'*** Unexpected error loading ML model: {0}\n\n'.format(
sys.exc_info()[0]))
raise
def upd_using_labelled_data(self, p_df_match, p_df_match_lbl):
"""Update the data to be matched with the manually labelled data.
Actions
-------
* Discard columns that are not in the key_list. Drop records with
null fields.
* Update the data to be matched (i.e. the cartesian product data)
with "match" values from the labelled data.
Returns
-------
Updated dataframe
"""
self.logger.info('\n\nEntering upd_using_labelled_data\n\n')
# force call-by-value
df_match = p_df_match.copy()
df_match_lbl = p_df_match_lbl.copy()
if df_match_lbl.empty:
self.logger.info('Input dataframe df_match_lbl is empty.')
return (df_match)
if df_match.empty:
self.logger.info('Input dataframe df_match is empty.')
return (df_match)
self.logger.info(
'Data set to be classified: {0}\n{1}\n{2}\n\n'
'Labelled data: \n{3}\n{4}\n{5}\n\n'.format(
df_match.shape,
df_match.columns,
df_match.apply(pd.Series.nunique),
df_match_lbl.shape,
df_match_lbl.columns,
df_match_lbl.apply(pd.Series.nunique)
)
)
# Update the data to be matched with the results from manual
# labelling effort. After doing all this hard manual
# classification effort, why not use it? df_match_upd =
# pd.merge(df_match, df_match_lbl, how='left', on=['publisher0',
# 'vendor_X'])
# First keep only relevant columns from labelled data
df_match_lbl1 = df_match_lbl.loc[
:,
self._key_list + ['match']
]
# Next update new input data with known values from
# the labelled data
df_match_upd0 = pd.merge(
df_match,
df_match_lbl1,
how='left',
on=self._key_list
)
# drop records with null fields (if any)
df_match_upd = df_match_upd0.dropna(
how='any',
subset=self._attr_list
)
# check that only Match values changed
self.logger.debug(
'\n\nCheck that updating with the labelled '
'data did not add extra null records.\n'
'--Updated data set to be classified:'
'\n{0}\n{1}\n{2}\n\n'.format(
df_match_upd.shape,
df_match_upd.columns,
df_match_upd.apply(pd.Series.nunique)
)
)
return (df_match_upd)
def ml_classify(self, p_df_match_upd):
"""Do the Machine Learning Classification.
Actions
-------
* Separate the test data to be classified from the data which has
already been labelled.
* Format the test data as a numpy array and run the Random Forest
Classification algorithm.
* Update the I/P test dataframe with the classification match
results.
Returns
-------
Dataframe containing classified test data
Dataframe containing manually labelled data
"""
# Force call by value
df_match_upd = p_df_match_upd.copy()
# Do ML classification
self.logger.info('\n\nEntering ml_classify\n\n')
if df_match_upd.empty:
self.logger.critical(
'*** ML: Input dframe is empty!'
)
return(df_match_upd, df_match_upd)
# Separate out the test data (i.e. not yet classified)
df_match_test = df_match_upd[df_match_upd['match'].isnull()]
df_match_labelled = df_match_upd[
df_match_upd['match'].notnull()
]
if df_match_test.empty:
df_match_test2 = df_match_test.reset_index(drop=True)
self.logger.info(
'\n\n ML: No elements to classify!'
)
else:
self.logger.info(
'\nStarting ML matching\n\n'
)
# Format the test data feature set.
df_match_test1 = df_match_test[['match'] + self._feature_list]
# Convert to a numpy array for input to the ML algorithm
np_match_test1 = np.asarray(df_match_test1)
Xt = np_match_test1[:, 1:]
s_match_test = pd.Series(self.__clf.predict(Xt))
df_match_test2 = df_match_test.reset_index(drop=True)
df_match_test2['match'] = s_match_test
# Most, if not all, test data pairs will be rejected
# since the labelling effort was quite comprehensive
self.logger.info(
'\n\nResults of ML '
'classification: \n\n'
'Test data: {0}\n{1}\n '
'Labelled data: {2}\n{3}\n '
'Match counts: {4}\n'.format(
df_match_test2.shape,
df_match_test2.columns,
df_match_labelled.shape,
df_match_labelled.columns,
df_match_test2['match'].value_counts()
)
)
sample_size = min(
(df_match_test2.match == 1).sum(),
10
)
if sample_size > 0:
self.logger.info(
'\nSample matches: \n{0}\n\n'.format(
df_match_test2[
df_match_test2.match == 1
].sample(sample_size)
)
)
else:
self.logger.info(
'\nNo matches!'
)
return (df_match_test2, df_match_labelled)
def post_process_matched_data(
self,
p_df_match_test2,
p_df_match_lbl
):
"""Post-process the matched and labelled data.
Actions
-------
Concatenate the manually labelled data with the data freshly
classified by the ML algorithm.
Eliminate duplicate records, if any.
Keep +ve matches only.
Returns
-------
Dataframe containing consolidated match data: ML-classified test data
and labelled data
"""
# Force call-by-value
df_match_test2 = p_df_match_test2.copy()
df_match_lbl = p_df_match_lbl.copy()
# concatenate the two sets of classified data: the manual set, and
# the machine-classified one
self.logger.info('\n\nEntering post_process_matched_data\n\n')
if df_match_lbl.empty:
self.logger.info(
'Input dataframe df_match_labelled is empty.\n\n'
)
return (df_match_test2)
if df_match_test2.empty:
self.logger.info(
'Input dataframe df_match_test2 is empty.\n\n'
)
return (df_match_lbl)
self.logger.info(
'ML-matched data: {0}\n{1}\n{2}\n'
'Labelled data: {3}\n{4}\n{5}\n\n'.format(
df_match_test2.shape,
df_match_test2.columns,
df_match_test2.apply(pd.Series.nunique),
df_match_lbl.shape,
df_match_lbl.columns,
df_match_lbl.apply(pd.Series.nunique)
)
)
df_match_consol1 = pd.concat([
df_match_test2[
df_match_test2['match'].notnull()
],
df_match_lbl
],
ignore_index=True
)
# eliminate any possible remaining duplicate records
df_match_consol = df_match_consol1.drop_duplicates(self._key_list)
self.logger.info(
'\nConsolidated Matched dataframe: '
'\n{0}\n{1}\n{2}\n{3}\n\n'.format(
df_match_consol.shape,
df_match_consol.columns,
df_match_consol['match'].value_counts(),
df_match_consol.apply(pd.Series.nunique)
)
)
# Only interested in +ve matches
df_match_consol1 = df_match_consol[
df_match_consol['match'] == 1
]
self.logger.debug(
'\nFinal consolidated match data set \n{0}\n\n'.format(
df_match_consol1.shape
)
)
return (df_match_consol1)
|
gpl-3.0
|
khkaminska/scikit-learn
|
sklearn/utils/estimator_checks.py
|
31
|
52862
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
|
bsd-3-clause
|
gotomypc/scikit-learn
|
examples/cluster/plot_dbscan.py
|
346
|
2479
|
# -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
yashsharan/sympy
|
sympy/plotting/plot.py
|
8
|
65066
|
"""Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
import inspect
from collections import Callable
import warnings
import sys
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
def _arity(f):
"""
Python 2 and 3 compatible version that do not raise a Deprecation warning.
"""
if sys.version_info < (3,):
return len(inspect.getargspec(f)[0])
else:
param = inspect.signature(f).parameters.values()
return len([p for p in param if p.kind == p.POSITIONAL_OR_KEYWORD])
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = _arity(c)
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = _arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z,
cmap=getattr(self.cm, 'viridis', self.cm.jet),
rstride=1, cstride=1, linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
|
bsd-3-clause
|
rlowrance/re-avm
|
samples.py
|
1
|
13107
|
'''create training and test samples from the transactions
* add certain fields
* select transactions that contain "reasonable" values
INPUT FILES
INPUT/transactions-al-g-sfr.csv
OUTPUT FILES
WORKING/samples-test.csv
WORKING/samples-train.csv
WORKING/samples-train-validate.csv
WORKING/samples-validate.csv
'''
from __future__ import division
import cPickle as pickle
import datetime
import numpy as np
import pandas as pd
import pdb
from pprint import pprint
import random
from sklearn import cross_validation
import sys
from Bunch import Bunch
from columns_contain import columns_contain
from Features import Features
from Logger import Logger
from ParseCommandLine import ParseCommandLine
from Path import Path
import layout_transactions as layout
cc = columns_contain
def usage(msg=None):
if msg is not None:
print msg
print 'usage : python samples.py [--test]'
print ' --test: run in test mode'
sys.exit(1)
def make_control(argv):
# return a Bunch
print argv
if len(argv) not in (1, 2):
usage('invalid number of arguments')
pcl = ParseCommandLine(argv)
arg = Bunch(
base_name=argv[0].split('.')[0],
test=pcl.has_arg('--test'),
)
random_seed = 123
random.seed(random_seed)
dir_working = Path().dir_working()
debug = False
out_file_name_base = ('testing-' if arg.test else '') + arg.base_name
return Bunch(
arg=arg,
debug=debug,
fraction_test=0.2,
max_sale_price=85e6, # according to Wall Street Journal
path_in=dir_working + 'transactions-al-g-sfr.csv',
path_out_info_reasonable=dir_working + out_file_name_base + '-info-reasonable.pickle',
path_out_test=dir_working + out_file_name_base + '-test.csv',
path_out_train=dir_working + out_file_name_base + '-train.csv',
path_out_train_validate=dir_working + out_file_name_base + '-train-validate.csv',
path_out_validate=dir_working + out_file_name_base + '-validate.csv',
random_seed=random_seed,
test=arg.test,
)
def report_and_remove(df, keep_masks):
'return new dataframe with just the kept rows AND info in the table that is printed'
print 'impact of individual masks'
format = '%40s removed %6d samples (%3d%%)'
info = {}
sorted_names = sorted([name for name in keep_masks.keys()])
for name in sorted_names:
keep_mask = keep_masks[name]
n_removed = len(df) - sum(keep_mask)
fraction_removed = n_removed / len(df)
print format % (name, n_removed, 100.0 * fraction_removed)
info[name] = fraction_removed
mm = reduce(lambda a, b: a & b, keep_masks.values())
total_removed = len(df) - sum(mm)
total_fraction_removed = total_removed / len(df)
print format % ('*** in combination', total_removed, 100.0 * total_fraction_removed)
r = df[mm]
return r, info
def check_never_missing(df, feature_names):
'verify that each ege feature is always present'
print
print 'Each of these fields should never be missing'
total_missing = 0
format_string = 'field %40s is missing %7d times'
for name in feature_names:
count_missing = sum(pd.isnull(df[name]))
print format_string % (name, count_missing)
total_missing += count_missing
print format_string % ('** total across fields **', total_missing)
print 'total missing', total_missing
if total_missing > 0:
pdb.set_trace() # should not be any missing
pass
def check_no_zeros(df, feature_names):
'check that only a few expected fields have zero values'
# TODO: some of the features can have zero values!
total_zeros = 0
format_string = 'field %40s is zero %7d times'
for name in feature_names:
if ('_has_' in name) or ('is_' in name):
continue # these are 0/1 indicator features
if name in (layout.building_bedrooms,
layout.building_basement_square_feet,
layout.building_fireplace_number,
layout.census2000_fraction_owner_occupied,
layout.has_pool,
layout.parking_spaces,):
continue # also, these should be often zero
count_zero = sum(df[name] == 0)
print format_string % (name, count_zero)
total_zeros += count_zero
print format_string % ('** total across fields **', total_zeros)
if total_zeros > 0:
print 'found some unexpected zero values'
pdb.set_trace() # should not be any zeros
def check_feature_values(df):
# age features are added just before a model is fitted
feature_names = sorted([x for x, y in Features().ege() if 'age' not in x])
check_never_missing(df, feature_names)
check_no_zeros(df, feature_names)
def reasonable_feature_values(df, control):
'return new DataFrame containing sample in df that have "reasonable" values'
def below(percentile, series):
quantile_value = series.quantile(percentile / 100.0)
r = series < quantile_value
return r
# set mask value in m to True to keep the observation
m = {}
m['assessment total > 0'] = df[layout.assessment_total] > 0
m['assessment land > 0'] = df[layout.assessment_land] > 0
m['assessment improvement > 0'] = df[layout.assessment_improvement] > 0
m['baths > 0'] = df[layout.building_baths] > 0
m['effective year built >= year built'] = df[layout.year_built_effective] >= df[layout.year_built]
m['full price'] = layout.mask_full_price(df)
m['latitude known'] = layout.mask_gps_latitude_known(df)
m['longitude known'] = layout.mask_gps_longitude_known(df)
m['land size < 99th percentile'] = below(99, df[layout.lot_land_square_feet])
m['land size > 0'] = df[layout.lot_land_square_feet] > 0
m['living size < 99th percentile'] = below(99, df[layout.building_living_square_feet])
m['living square feet > 0'] = df[layout.building_living_square_feet] > 0
m['median household income > 0'] = df[layout.census2000_median_household_income] > 0
m['new or resale'] = layout.mask_new_or_resale(df)
m['one building'] = layout.mask_is_one_building(df)
m['one APN'] = layout.mask_is_one_parcel(df)
# m['recording date present'] = ~df[layout.recording_date + '_deed'].isnull() # ~ => not
m['price > 0'] = df[layout.price] > 0
m['price < max'] = df[layout.price] < control.max_sale_price
m['rooms > 0'] = df[layout.building_rooms] > 0
m['resale or new construction'] = (
layout.mask_is_new_construction(df) |
layout.mask_is_resale(df)
)
m['sale date present'] = layout.mask_sale_date_present(df)
m['sale date valid'] = layout.mask_sale_date_valid(df)
m['stories > 0'] = df[layout.building_stories] > 0
m['units == 1'] = df[layout.n_units] == 1
m['year_built > 0'] = df[layout.year_built] > 0
print 'effects of reasonable values'
return report_and_remove(df, m)
def add_features(df, control):
def split(date):
year = int(date / 10000)
md = date - year * 10000
month = int(md / 100)
day = md - month * 100
return year, month, day
def python_date(date):
'yyyymmdd --> datetime.date(year, month, day)'
year, month, day = split(date)
return datetime.date(int(year), int(month), int(day))
def year(date):
'yyyymmdd --> datetime.date(year, 7, 1); mid point of year'
year, month, day = split(date)
return year
def yyyymm(date):
year, month, day = split(date)
return year * 100 + month
# create sale-date related features
def append_column(name, values):
df.insert(len(df.columns),
name,
pd.Series(values, index=df.index),
)
sale_date = df[layout.sale_date]
sale_date_python = sale_date.apply(python_date)
append_column(layout.sale_date_python, sale_date_python)
append_column(layout.yyyymm, sale_date.apply(yyyymm))
# create age and similar fields
# NOTE: these are ages at the sale date
sale_year = sale_date.apply(year)
year_built = df[layout.year_built]
effective_year_built = df[layout.year_built_effective]
age = sale_year - year_built
effective_age = sale_year - effective_year_built
append_column(layout.age, age)
append_column(layout.age2, age * age)
append_column(layout.age_effective, effective_age)
append_column(layout.age_effective2, effective_age * effective_age)
# create indicator features
append_column(layout.is_new_construction, layout.mask_is_new_construction(df))
append_column(layout.is_resale, layout.mask_is_resale(df))
append_column(layout.building_has_basement, df[layout.building_basement_square_feet] > 0)
append_column(layout.building_has_fireplace, df[layout.building_fireplace_number] > 0)
append_column(layout.has_parking, df[layout.parking_spaces] > 0)
append_column(layout.has_pool, df[layout.pool_flag] == 'Y')
# create additional indicators aggregating certain PROPN codes
def create(new_column_base, ored_column_bases):
def create2(prefix):
def ored_name(ored_index):
return prefix + '_has_' + ored_column_bases[ored_index]
mask = df[ored_name(0)]
for index in range(1, len(ored_column_bases)):
mask2 = df[ored_name(index)]
mask = mask | mask2
append_column(prefix + '_has_' + new_column_base, mask)
for prefix in ('census_tract', 'zip5'):
create2(prefix)
create('any_commercial', ('commercial', 'commercial_condominium',))
create('any_industrial', ('industrial', 'industrial_light', 'industrial_heavy',))
create('any_non_residential', ('amusement', 'any_commercial', 'financial_institution', 'hotel',
'any_industrial', 'medical', 'office_building', 'parking',
'retail', 'service', 'transport', 'utilities', 'warehouse',))
def main(argv):
control = make_control(argv)
sys.stdout = Logger(base_name=control.arg.base_name)
print control
transactions = pd.read_csv(control.path_in,
nrows=100000 if control.arg.test else None,
)
print 'transactions shape', transactions.shape
after_2000_census_known = transactions[layout.mask_sold_after_2002(transactions)]
print 'after 2000 census known shape', after_2000_census_known.shape
subset, info_reasonable = reasonable_feature_values(after_2000_census_known, control)
print 'subset shape', subset.shape
add_features(subset, control)
check_feature_values(subset)
# split into test and train
# stratify by yyyymm (month of sale)
def count_yyyymm(df, yyyymm):
return sum(df[layout.yyyymm] == yyyymm)
def split(df, fraction_test):
sss = cross_validation.StratifiedShuffleSplit(
y=df.yyyymm,
n_iter=1,
test_size=fraction_test,
train_size=None,
random_state=control.random_seed,
)
assert len(sss) == 1
for train_index, test_index in sss:
train = df.iloc[train_index]
test = df.iloc[test_index]
return test, train
# split samples into test and train, stratified on month of sale
# then split training data on test (validate) and train
test, train = split(subset, control.fraction_test)
fraction_test = control.fraction_test / (1 - control.fraction_test)
train_test, train_train = split(train, fraction_test)
# write the csv files
test.to_csv(control.path_out_test)
train.to_csv(control.path_out_train)
train_test.to_csv(control.path_out_train_validate)
train_train.to_csv(control.path_out_validate)
# count samples in each strata (= month)
yyyymms = sorted(set(subset[layout.yyyymm]))
format_string = '%6d # total %6d # test %6d # train %6d # train_test %6d # train_train %6d'
for yyyymm in yyyymms:
c1 = count_yyyymm(subset, yyyymm)
c2 = count_yyyymm(test, yyyymm)
c3 = count_yyyymm(train, yyyymm)
c4 = count_yyyymm(train_test, yyyymm)
c5 = count_yyyymm(train_train, yyyymm)
print format_string % (yyyymm, c1, c2, c3, c4, c5)
if c1 != c2 + c3:
print 'c1 not exactly split'
pdb.set_trace()
if c3 != c4 + c5:
print 'c3 not exactly split'
pdb.set_trace()
print 'totals'
print format_string % (0, len(subset), len(test), len(train), len(train_train), len(train_test))
f = open(control.path_out_info_reasonable, 'wb')
pickle.dump((info_reasonable, control), f)
f.close()
print control
if control.test:
print 'DISCARD OUTPUT: test'
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
pd.DataFrame()
np.array()
main(sys.argv)
|
bsd-3-clause
|
clemkoa/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
168
|
1793
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
|
bsd-3-clause
|
pratapvardhan/scikit-image
|
doc/examples/features_detection/plot_template.py
|
12
|
1790
|
"""
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2, adjustable='box-forced')
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
|
bsd-3-clause
|
bsipocz/scikit-image
|
doc/examples/plot_marked_watershed.py
|
7
|
1552
|
"""
===============================
Markers for watershed transform
===============================
The watershed is a classical algorithm used for **segmentation**, that
is, for separating different objects in an image.
Here a marker image is build from the region of low gradient inside the image.
See Wikipedia_ for more details on the algorithm.
.. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing)
"""
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.morphology import watershed, disk
from skimage import data
from skimage.filters import rank
from skimage.util import img_as_ubyte
image = img_as_ubyte(data.camera())
# denoise image
denoised = rank.median(image, disk(2))
# find continuous region (low gradient) --> markers
markers = rank.gradient(denoised, disk(5)) < 10
markers = ndi.label(markers)[0]
#local gradient
gradient = rank.gradient(denoised, disk(2))
# process the watershed
labels = watershed(gradient, markers)
# display results
fig, axes = plt.subplots(ncols=4, figsize=(8, 2.7))
ax0, ax1, ax2, ax3 = axes
ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax1.imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')
ax2.imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')
ax3.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax3.imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7)
for ax in axes:
ax.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
|
bsd-3-clause
|
terkkila/scikit-learn
|
sklearn/linear_model/__init__.py
|
270
|
3096
|
"""
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
|
bsd-3-clause
|
mhue/scikit-learn
|
sklearn/linear_model/coordinate_descent.py
|
42
|
73973
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
|
bsd-3-clause
|
tosolveit/scikit-learn
|
sklearn/utils/__init__.py
|
79
|
14202
|
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
|
bsd-3-clause
|
aitoralmeida/c4a_data_repository
|
RestApiInterface/src/packActivityRecognition/kasteren_data_transformer.py
|
2
|
6051
|
# -*- coding: utf-8 -*-
"""
This tool is to transform Kasteren datasets to our csv format
"""
import sys, getopt
import numpy as np
import time, datetime
import pandas as pd
from copy import deepcopy
from pandas.tseries.resample import TimeGrouper
__author__ = 'Gorka Azkune'
__copyright__ = "Copyright 2017, City4Age project"
__credits__ = ["Rubén Mulero", "Aitor Almeida", "Gorka Azkune", "David Buján"]
__license__ = "GPL"
__version__ = "0.1"
__maintainer__ = "Gorka Azkune"
__email__ = "[email protected]"
__status__ = "Prototype"
"""
Function to parse arguments from command line
Input:
argv -> command line arguments
Output:
sense_file -> csv file with sensor data (start, end, sensor-id, value=1)
act_file -> csv file with activity data (start, end, activity-id)
output -> csv file where timestamped sensor activation are listed where
[timestamp, sensor, activity, start-end]
"""
def parseArgs(argv):
sense_file = ''
act_file = ''
output = ''
try:
opts, args = getopt.getopt(argv,"hs:a:o:",["sense=","act=","out="])
except getopt.GetoptError:
print 'kasteren_data_transformer.py -sense <sense_dataset> -act <act_dataset> -out <output_dataset>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'kasteren_data_transformer.py -sense <sense_dataset> -act <act_dataset> -out <output_dataset>'
sys.exit()
elif opt in ("-s", "--sense"):
sense_file = arg
elif opt in ("-a", "--act"):
act_file = arg
elif opt in ("-o", "--out"):
output = arg
return sense_file, act_file, output
"""
Function to detect and remove overlapping activities
Input:
act_df: pd.DataFrame with activities (start, end, activity)
act_dict: dictionaty of activity ids and names {id:'Activity'}
Output:
act_df: filtered act_df, where overlapping activities do not exist
"""
def detectOverlappingActivities(act_df, act_dict):
# For testing purposes, remove index 53
act_df = act_df.drop([53])
for index in act_df.index:
start = act_df.loc[index, 'start']
end = act_df.loc[index, 'end']
act_id = act_df.loc[index, 'activity']
#print index, start < end
#print 'Activity', act_dict[act_id], 'start:', start, 'end:', end
overlapping = act_df[np.logical_and(act_df['start'] > start, act_df['end'] < end)]
#activity_index = filtered_df[np.logical_or(filtered_df['a_start_end'] == 'start', filtered_df['a_start_end'] == 'end')].index
if not overlapping.empty:
#print '--------------------------'
#print 'Activity', act_dict[act_id], start, end
#print 'Overlapping activities:'
#print overlapping.head()
act_df = act_df.drop([index])
#print 'Activities after removing overlapping'
#print act_df.head(50)
return act_df
"""
Dataset transformation function
"""
def transformDataset(sense_file, act_file):
# List of activities which we want to store in the transformed dataset
target_acts = [13, 5, 15]
# open sense dataset file
sense_df = pd.read_csv(sense_file, parse_dates=0, names=['start', 'end', 'sensor', 'value'])
print 'Sensor dataset:'
print sense_df.head()
# open activity dataset file
act_df = pd.read_csv(act_file, parse_dates=0, names=['start', 'end', 'activity'])
print 'Activity dataset'
print act_df.head()
# build sensor dictionary
sensor_dict = {1:'Microwave', 5:'HallToiletDoor', 6:'HallBathroomDoor', 7:'CupsCupboard', 8:'Fridge', 9:'PlatesCupboard', 12:'Frontdoor', 13:'Dishwasher', 14:'ToiletFlush', 17:'Freezer', 18:'PansCupboard', 20:'Washingmachine', 23:'GroceriesCupboard', 24:'HallBedroomDoor'}
# build activity dict
act_dict = {1:'LeaveHouse', 4:'UseToilet', 5:'TakeShower', 10:'GoToBed', 13:'PrepareBreakfast', 15:'PrepareDinner', 17:'GetDrink'}
#act_df = detectOverlappingActivities(act_df, act_dict)
#print 'Activities after removing overlapping'
#print act_df.head(50)
# Initialize transformed dataset
trans_df = pd.DataFrame(index = sense_df['start'].values)
sensors = sense_df['sensor'].values
snames = []
for i in xrange(len(sensors)):
snames.append(sensor_dict[sensors[i]])
trans_df['sensor'] = snames
trans_df['activity'] = ['None']*len(trans_df)
trans_df['start/end'] = ['']*len(trans_df)
#print 'Trans df:'
#print trans_df.head(50)
# Label each sensor activation with an activity name, using act_df start/end times
for index in act_df.index:
act_id = act_df.loc[index, 'activity']
try:
target_acts.index(act_id)
act_name = act_dict[act_id]
index_list = trans_df[np.logical_and(trans_df.index >= act_df.loc[index, 'start'], trans_df.index <= act_df.loc[index, 'end'])].index
#print 'Activity', act_name
#print index_list
trans_df.loc[index_list, 'activity'] = act_name
aux = 0
for i in index_list:
if aux == 0:
trans_df.loc[i, 'start/end'] = 'start'
elif aux == len(index_list) - 1:
trans_df.loc[i, 'start/end'] = 'end'
aux = aux + 1
except ValueError:
pass
# Remove all None actions to have a clear dataset (only activities)
trans_df = trans_df[trans_df['activity'] != 'None']
print 'Trans df:'
print trans_df.head(50)
return trans_df
"""
Main function
"""
def main(argv):
# call the argument parser
[sense_file, act_file, output] = parseArgs(argv[1:])
print 'Sense:', sense_file
print 'Act:', act_file
print 'Output:', output
dataset_df = transformDataset(sense_file, act_file)
dataset_df.to_csv(output)
if __name__ == "__main__":
main(sys.argv)
|
gpl-3.0
|
ua-snap/downscale
|
snap_scripts/epscor_sc/older_epscor_sc_scripts_archive/derived_metrics_epscor_se.py
|
1
|
4556
|
# seasonal calculations
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'affine' ]
else:
p1 = None
T0 = meta[ 'affine' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: ( c, r ) * T1
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if to_latlong == False:
return eastings, northings
elif (to_latlong == True) & (input_crs != None):
# Project all longitudes, latitudes
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
else:
BaseException( 'cant reproject to latlong without an input_crs' )
if __name__ == '__main__':
import glob, os, rasterio, itertools
import numpy as np
from pathos import multiprocessing
import pandas as pd
import xarray as xr
# some setup args
base_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5'
variables = [ 'tasmin', 'tasmax' ]
scenarios = [ 'historical', 'rcp26', 'rcp45', 'rcp60', 'rcp85' ]
models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4' ]
for variable, model, scenario in itertools.product( variables, models, scenarios ):
seasons = {'DJF':[12, 1, 2],'MAM':[3, 4, 5],'JJA':[6, 7, 8],'SON':[9, 10, 11]}
# list the files
files = glob.glob( os.path.join( base_dir, model, scenario, variable, '*.tif' ) )
months = [ int(f.split('.')[0].split('_')[-2]) for f in files ]
years = [ int(f.split('.')[0].split('_')[-1]) for f in files ]
df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
df = df.sort_values(['year', 'month'])
# some begin end stuff
begin = '-'.join( df.iloc[0].ix[['month','year']].astype( str ) )
end = '-'.join( ['1', (df.iloc[-1].ix['year']+1).astype( str )] )
# get the lons and lats for the NetCDF
lons, lats = coordinates( files[0] )
# lons, lats = [ np.unique( x ) for x in [lons, lats] ]
# make the huge data array
pool = multiprocessing.Pool( 32 )
arr = np.array( pool.map( lambda x: rasterio.open( x ).read( 1 ), files ) )
pool.close()
pool.join()
# create the dataset in xarray
ds = xr.Dataset( { variable:(['time','x','y'], arr) },
coords={ 'lon': (['x', 'y'], lons),
'lat': (['x', 'y'], lats),
'time': pd.date_range( begin, end, freq='M' ) },
attrs={ 'units':'Celcius', 'time_interval':'monthly',
'variable':variable, 'model':model, 'scenario':scenario } )
# group the data by season
grouped = ds.groupby( 'time.season' )
# get the season names
seasons = grouped.groups.keys()
# extract the indices representing these months from the larger xr.Dataset
season = 'DJF'
time_indices = grouped.groups[ season ]
# slice the data using that season
ds_season = ds[ variable ][ time_indices ]
# [ ds[ variable ][ grouped.groups[ season ] ] for season in seasons ]
# # seasonal average ? -- for the series
# ds_season_mean = ds.groupby( 'time.season' ).mean( axis=0 )
# # seasonal min ? -- for the series
# ds_season_min = ds.groupby( 'time.season' ).min( axis=0 )
# # seasonal max ? -- for the series
# ds_season_min = ds.groupby( 'time.season' ).max( axis=0 )
# for season in [ 'DJF', 'MAM', 'JJA', 'SON' ]:
# # select the season / metric here for output to GTiff:
# #
# # write it out
# output_filename = os.path.join( output_path, season+'_mean.tif' )
# with rasterio.open( output_filename, 'w', **meta ) as out:
# out.write( season_arr, 1 )
# # # TEST ZONE
# # seasonal averages by year? -- doesnt make sense
# ds_test_season = ds.groupby( 'time.year' ).apply( lambda x: x.groupby( 'time.season' ).mean( axis=0 ) )
|
mit
|
khkaminska/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
348
|
6232
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
bsd-3-clause
|
idlead/scikit-learn
|
examples/applications/svm_gui.py
|
287
|
11161
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
|
ooovector/qtlab_replacement
|
save_pkl.py
|
1
|
2653
|
import matplotlib.cm as cmap
import datetime
import os
from matplotlib import pyplot as plt
import pickle
from .config import get_config
import numpy as np
from . import plotting
import scipy.io
import pathlib
def default_measurement_save_path(path = None, time=True, root = None, name=None, unfinished=False, mkdir=False):
if (path is None) or (path==''):
(data_root, day_folder_name, time_folder_name) = get_location(unfinished=unfinished)
if root is not None:
data_root = root
if time and name:
path = '{0}/{1}/{2}-{3}'.format(data_root, day_folder_name, time_folder_name, name)
elif time:
path = '{0}/{1}/{2}'.format(data_root, day_folder_name, time_folder_name)
elif name:
path = '{0}/{1}/{2}'.format(data_root, day_folder_name, name)
else:
path = '{0}/{1}'.format(data_root, day_folder_name)
if mkdir:
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
return path
def get_location(unfinished=False):
config = get_config()
if unfinished:
data_root = config['datadir']+'/unfinished'
else:
data_root = config['datadir']
now = datetime.datetime.now()
day_folder_name = now.strftime('%Y-%m-%d')
time_folder_name = now.strftime('%H-%M-%S')
return (data_root, day_folder_name, time_folder_name)
def load_pkl(filename, location=None):
if not location:
(data_root, day_folder_name, time_folder_name) = get_location()
location = '{0}/{1}'.format(data_root, day_folder_name)
print ('{0}/{1}.pkl'.format(location, filename))
f = open('{0}/{1}.pkl'.format(location, filename), "rb")
return pickle.load(f)
def save_pkl(header, data, plot = True, curve_fit=None, annotation=None, location=None, time=True, filename=None, matlab=False, gzip=False, plot_axes=None):
import gzip
location = default_measurement_save_path(path = location, time=time)
if not filename:
if 'type' in header:
type = header['type']
elif 'name' in header:
#type = ' '.join(data.keys())
filename = '{0}'.format(header['name'])
else:
filename = ' '.join(data.keys())
pathlib.Path(location).mkdir(parents=True, exist_ok=True)
with open('{0}/{1}.pkl'.format(location, filename), 'wb') as f:
if header:
data_pkl = (2, data, header)
else:
data_pkl = data
pickle.dump(data_pkl, f)
if plot_axes and plot:
plotting.update_plot_measurement(data, plot_axes)
plotting.plot_add_annotation(plot_axes, annotation)
plotting.plot_save(plot_axes, location)
elif plot:
plotting.plot_measurement(data, filename, save=location, annotation=annotation, subplots=True)
if matlab:
matfilename = '{0}/{1}.mat'.format(location, filename)
scipy.io.savemat(matfilename, mdict=data)
return filename
|
gpl-3.0
|
jforbess/pvlib-python
|
pvlib/test/test_atmosphere.py
|
2
|
1579
|
import logging
pvl_logger = logging.getLogger('pvlib')
import datetime
import numpy as np
import pandas as pd
from nose.tools import raises
from nose.tools import assert_almost_equals
from pvlib.location import Location
from pvlib import solarposition
from pvlib import atmosphere
# setup times and location to be tested.
times = pd.date_range(start=datetime.datetime(2014,6,24),
end=datetime.datetime(2014,6,26), freq='1Min')
tus = Location(32.2, -111, 'US/Arizona', 700)
times_localized = times.tz_localize(tus.tz)
ephem_data = solarposition.get_solarposition(times, tus)
# need to add physical tests instead of just functional tests
def test_pres2alt():
atmosphere.pres2alt(100000)
def test_alt2press():
atmosphere.pres2alt(1000)
# two functions combined will generate unique unit tests for each model
def test_airmasses():
models = ['simple', 'kasten1966', 'youngirvine1967', 'kastenyoung1989',
'gueymard1993', 'young1994', 'pickering2002', 'invalid']
for model in models:
yield run_airmass, model, ephem_data['zenith']
def run_airmass(model, zenith):
atmosphere.relativeairmass(zenith, model)
def test_absoluteairmass():
relative_am = atmosphere.relativeairmass(ephem_data['zenith'], 'simple')
atmosphere.absoluteairmass(relative_am)
atmosphere.absoluteairmass(relative_am, pressure=100000)
def test_absoluteairmass_numeric():
atmosphere.absoluteairmass(2)
def test_absoluteairmass_nan():
np.testing.assert_equal(np.nan, atmosphere.absoluteairmass(np.nan))
|
bsd-3-clause
|
rjferrier/fluidity
|
tests/wetting_and_drying_balzano2_cg/plotfs_detec.py
|
5
|
5473
|
#!/usr/bin/env python
import vtktools
import sys
import math
import re
import commands
import matplotlib.pyplot as plt
import getopt
from scipy.special import erf
from numpy import poly1d
from matplotlib.pyplot import figure, show
from numpy import pi, sin, linspace
from matplotlib.mlab import stineman_interp
from numpy import exp, cos
from fluidity_tools import stat_parser
def mirror(x):
return 13800-x
def usage():
print 'Usage:'
print 'plotfs_detec.py [-w] --file=detector_filename --save=filename'
print '--save=... saves the plots as images instead of plotting them on the screen.'
print '-w plots the wetting procedure (drying is default).'
# should be copied from the diamond extrude function. X is 2 dimensional
def bathymetry_function(X):
if X<=3600 or X>6000:
return -X/2760
elif X>3600 and X<=4800:
return -30.0/23
elif X>4800 and X<=6000:
return -X/1380+50.0/23
################# Main ###########################
def main(argv=None):
filename=''
timestep_ana=0.0
dzero=0.01
save='' # If nonempty, we save the plots as images instead if showing them
wetting=False
try:
opts, args = getopt.getopt(sys.argv[1:], ":w", ['file=','save='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '--file':
filename=arg
elif opt == '--save':
save=arg
elif opt == '-w':
wetting=True
if filename=='':
print 'No filename specified. You have to give the detectors filename.'
usage()
sys.exit(2)
####################### Print time plot ###########################
print 'Generating time plot'
s = stat_parser(filename)
timesteps=s["ElapsedTime"]["value"]
timestep=timesteps[1]-timesteps[0]
print "Found ", len(timesteps), " timesteps with dt=", timestep
if timestep_ana==0.0:
timestep_ana=timestep
fs=s["water"]["FreeSurface"]
print "Found ", len(fs), " detectors. We assume they are equidistant distributed over the domain (", 0, "-", 13800, ")."
# Get and plot results
plt.ion() # swith on interactive mode
fig2 = figure()
ax2 = fig2.add_subplot(111)
if wetting:
##plot_start=90 # in timesteps
plot_start=22 # in timesteps, after 18 timesteps the waterlevel reaches its lowest point
##plot_end=114 # in timesteps
plot_end=54 # in timesteps
plot_name='Wetting'
else:
plot_start=54 # in timesteps
plot_end=90 # in timesteps
plot_name='Drying'
for t in range(0,len(timesteps)):
# ignore the first waveperiod
if t<plot_start:
continue
if t>plot_end:
continue
fsvalues=[]
xcoords=[]
for name, item in fs.iteritems():
#print name
xcoords.append(mirror(s[name]['position'][0][0]))
#print xcoord
fsvalues.append(fs[name][t])
# Plot result of one timestep
ax2.plot(xcoords,fsvalues,'r,', label='Numerical solution')
# Plot Analytical solution
fsvalues_ana=[]
offset=-bathymetry_function(0.0)+dzero
xcoords.sort()
for x in xcoords:
fsvalues_ana.append(bathymetry_function(mirror(x))-offset)
# Plot vertical line in bathmetry on right boundary
xcoords.append(xcoords[len(xcoords)-1]+0.000000001)
fsvalues_ana.append(2.1)
ax2.plot(xcoords, fsvalues_ana, 'k', label='Bathymetry')
#plt.legend()
if t==plot_end:
plt.ylim(-2.2,1.4)
# change from meters in kilometers in the x-axis
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = plt.xticks()
for i in range(0,len(locs)):
labels[i]=str(locs[i]/1000)
plt.xticks(locs, labels)
#plt.title(plot_name)
plt.xlabel('Position [km]')
plt.ylabel('Free surface [m]')
if save=='':
plt.draw()
raw_input("Please press Enter")
else:
plt.savefig(save+'_'+plot_name+'.pdf', facecolor='white', edgecolor='black', dpi=100)
plt.cla()
t=t+1
# Make video from the images:
# mencoder "mf://*.png" -mf type=png:fps=30 -ovc lavc -o output.avi
if __name__ == "__main__":
main()
|
lgpl-2.1
|
andyh616/mne-python
|
examples/datasets/plot_spm_faces_dataset.py
|
17
|
4379
|
# doc:slow-example
"""
==========================================
From raw data to dSPM on SPM Faces dataset
==========================================
Runs a full pipeline using MNE-Python:
- artifact removal
- averaging Epochs
- forward model computation
- source reconstruction using dSPM on the contrast : "faces - scrambled"
Note that this example does quite a bit of processing, so even on a
fast machine it can take about 10 minutes to complete.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import matplotlib.pyplot as plt
import mne
from mne.datasets import spm_face
from mne.preprocessing import ICA, create_eog_epochs
from mne import io
from mne.minimum_norm import make_inverse_operator, apply_inverse
print(__doc__)
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
###############################################################################
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
raw = io.Raw(raw_fname % 1, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 30, method='iir')
events = mne.find_events(raw, stim_channel='UPPT001')
# plot the events to get an idea of the paradigm
mne.viz.plot_events(events, raw.info['sfreq'])
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.6
baseline = None # no baseline as high-pass is applied
reject = dict(mag=5e-12)
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
# Fit ICA, find and remove major artifacts
ica = ICA(n_components=0.95).fit(raw, decim=6, reject=reject)
# compute correlation scores, get bad indices sorted by score
eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject)
eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='MRT31-2908')
ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on
ica.plot_components(eog_inds) # view topographic sensitivity of components
ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar
ica.plot_overlay(eog_epochs.average()) # inspect artifact removal
epochs_cln = ica.apply(epochs, copy=True) # clean data, default in place
evoked = [epochs_cln[k].average() for k in event_ids]
contrast = evoked[1] - evoked[0]
evoked.append(contrast)
for e in evoked:
e.plot(ylim=dict(mag=[-400, 400]))
plt.show()
# estimate noise covarariance
noise_cov = mne.compute_covariance(epochs_cln, tmax=0)
###############################################################################
# Visualize fields on MEG helmet
trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_'
'raw-trans.fif')
maps = mne.make_field_map(evoked[0], trans_fname, subject='spm',
subjects_dir=subjects_dir, n_jobs=1)
evoked[0].plot_field(maps, time=0.170)
###############################################################################
# Compute forward model
# Make source space
src_fname = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
if not op.isfile(src_fname):
src = mne.setup_source_space('spm', src_fname, spacing='oct6',
subjects_dir=subjects_dir, overwrite=True)
else:
src = mne.read_source_spaces(src_fname)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem)
forward = mne.convert_forward_solution(forward, surf_ori=True)
###############################################################################
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov,
loose=0.2, depth=0.8)
# Compute inverse solution on contrast
stc = apply_inverse(contrast, inverse_operator, lambda2, method, pick_ori=None)
# stc.save('spm_%s_dSPM_inverse' % constrast.comment)
# Plot contrast in 3D with PySurfer if available
brain = stc.plot(hemi='both', subjects_dir=subjects_dir)
brain.set_time(170.0) # milliseconds
brain.show_view('ventral')
# brain.save_image('dSPM_map.png')
|
bsd-3-clause
|
lbdreyer/iris
|
docs/iris/gallery_code/meteorology/plot_hovmoller.py
|
4
|
1425
|
"""
Hovmoller Diagram of Monthly Surface Temperature
================================================
This example demonstrates the creation of a Hovmoller diagram with fine control
over plot ticks and labels. The data comes from the Met Office OSTIA project
and has been pre-processed to calculate the monthly mean sea surface
temperature.
"""
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# load a single cube of surface temperature between +/- 5 latitude
fname = iris.sample_data_path("ostia_monthly.nc")
cube = iris.load_cube(
fname,
iris.Constraint("surface_temperature", latitude=lambda v: -5 < v < 5),
)
# Take the mean over latitude
cube = cube.collapsed("latitude", iris.analysis.MEAN)
# Now that we have our data in a nice way, lets create the plot
# contour with 20 levels
qplt.contourf(cube, 20)
# Put a custom label on the y axis
plt.ylabel("Time / years")
# Stop matplotlib providing clever axes range padding
plt.axis("tight")
# As we are plotting annual variability, put years as the y ticks
plt.gca().yaxis.set_major_locator(mdates.YearLocator())
# And format the ticks to just show the year
plt.gca().yaxis.set_major_formatter(mdates.DateFormatter("%Y"))
iplt.show()
if __name__ == "__main__":
main()
|
lgpl-3.0
|
voxlol/scikit-learn
|
examples/model_selection/plot_validation_curve.py
|
229
|
1823
|
"""
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
mfjb/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
129
|
43401
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
dhruv13J/scikit-learn
|
sklearn/feature_selection/tests/test_base.py
|
170
|
3666
|
import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
|
bsd-3-clause
|
plissonf/scikit-learn
|
sklearn/tests/test_common.py
|
70
|
7717
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
|
bsd-3-clause
|
mfjb/scikit-learn
|
sklearn/feature_extraction/hashing.py
|
183
|
6155
|
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
|
bsd-3-clause
|
Y-oHr-N/TextCategorization
|
gbssl/laplacian_svm.py
|
2
|
4407
|
import numpy as np
import scipy.sparse as sp
import scipy.linalg as LA
from cvxopt import matrix, solvers
from sklearn.base import BaseEstimator
from sklearn.metrics.pairwise import rbf_kernel
from .base import MRBinaryClassifierMixin
from .multiclass import SemiSupervisedOneVsRestClassifier
class BinaryLapSVC(BaseEstimator, MRBinaryClassifierMixin):
"""Laplacian Support Vectior Machines."""
def fit(self, X, y, L):
"""Fit the model according to the given training data.
Prameters
---------
X : array-like, shpae = [n_samples, n_features]
Training data.
y : array-like, shpae = [n_samples]
Target values (unlabeled points are marked as 0).
L : array-like, shpae = [n_samples, n_samples]
Graph Laplacian.
"""
labeled = y != 0
y_labeled = y[labeled]
n_samples, n_features = X.shape
n_labeled_samples = y_labeled.size
I = sp.eye(n_samples)
Y = sp.diags(y_labeled)
J = sp.eye(n_labeled_samples, n_samples)
K = rbf_kernel(X, gamma=self.gamma_k)
M = 2 * self.gamma_a * I \
+ 2 * self.gamma_i / n_samples**2 * L**self.p @ K
# Construct the QP, invoke solver
solvers.options['show_progress'] = False
sol = solvers.qp(
P = matrix(Y @ J @ K @ LA.inv(M) @ J.T @ Y),
q = matrix(-1 * np.ones(n_labeled_samples)),
G = matrix(np.vstack((
-1 * np.eye(n_labeled_samples),
n_labeled_samples * np.eye(n_labeled_samples)
))),
h = matrix(np.hstack((
np.zeros(n_labeled_samples),
np.ones(n_labeled_samples)
))),
A = matrix(y_labeled, (1, n_labeled_samples), 'd'),
b = matrix(0.0)
)
# Train a classifer
self.dual_coef_ = LA.solve(M, J.T @ Y @ np.array(sol['x']).ravel())
return self
class LapSVC(SemiSupervisedOneVsRestClassifier):
"""Laplacian Support Vectior Machines.
Parameters
----------
gamma_a : float
Regularization parameter.
gamma_i : float
Smoothness regularization parameter.
gamma_k : float
Kernel coefficient.
sparsify : {'kNN', 'MkNN', 'epsilonNN'}
Graph sparsification type.
n_neighbors : int > 0
Number of neighbors for each sample.
radius : float
Radius of neighborhoods.
reweight: {'rbf', 'binary'}
Edge re-weighting type.
t : float
Kernel coefficient.
normed : boolean, dealut True
If True, then compute normalized Laplacian.
p : integer > 0
Degree of the graph Laplacian.
Attributes
----------
X_ : array-like, shape = [n_samples, n_features]
Training data.
y_ : array-like, shape = [n_samples]
Target values.
classes_ : array-like, shpae = [n_classes]
Class labels.
A_ : array-like, shape = [n_samples, n_samples]
Adjacency matrix.
estimators_ : list of n_classes estimators
Estimators used for predictions.
label_binarizer_ : LabelBinarizer object
Object used to transform multiclass labels to binary labels and vice-versa.
References
----------
Mikhail Belkin, Partha Niyogi, Vikas Sindhwani,
"On Manifold Regularization",
AISTATS, 2005.
"""
def __init__(
self, gamma_a = 1.0, gamma_i = 1.0, gamma_k = 1.0,
sparsify = 'kNN', n_neighbors = 10, radius = 1.0, reweight = 'rbf',
t = None, normed = True, p = 1
):
super(LapSVC, self).__init__(
estimator = BinaryLapSVC(), sparsify = sparsify,
n_neighbors = n_neighbors, radius = radius,
reweight = reweight, t = t,
normed = normed
)
self.params = {
'gamma_a': gamma_a, 'gamma_i': gamma_i, 'gamma_k': gamma_k, 'p': p
}
self.estimator.set_params(**self.params)
|
mit
|
ctorney/statesFromSpace
|
processdata/calcValues.py
|
1
|
13257
|
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import math
from math import *
from geographiclib.geodesic import Geodesic
posfilename = '../data/nodes_meters.csv'
outfilename = '../data/node_values.csv'
posDF = pd.read_csv(posfilename)
columns = ['id', 'x', 'y', 'angle','dist_nn','angle_nn','align_nn']
df = pd.DataFrame(columns=columns)
xvals=posDF['xm'].values
yvals=posDF['ym'].values
kdindex = posDF[posDF['Name']!='Known_direction'].index.values
count=0
for i,pos in posDF.iterrows():
if pos['Name']!='Known_direction':
continue
# find nearest neighbour
thisX=pos['xm']
thisY=pos['ym']
thisAngle=pos['angle']
distances=(thisX-xvals)**2+(thisY-yvals)**2
distances[i]=math.nan
closest = np.nanargmin(distances)
# find nearest neighbour with heading
if posDF['Name'][closest]=='Known_direction':
closestKD = closest
else:
distances[kdindex]=math.nan
closestKD = np.nanargmin(distances)
# calculate distance and angle to nearest neighbour
diff = Geodesic.WGS84.Inverse(pos['Y'],pos['X'],posDF['Y'][closest],posDF['X'][closest])
dist_nn = diff['s12']
angle_nn = abs(math.cos(math.radians(90-diff['azi1']) - pos['angle'])) # lat lon goes clockwise from north
align_nn= abs(math.cos(pos['angle']-posDF['angle'][closestKD]))
df.loc[len(df)] = [count,thisX,thisY,thisAngle,dist_nn,angle_nn,align_nn]
df.to_csv(outfilename,index_col=False)
#
#rowCount = int(len(posDF))
#
## convert to a numpy array
#allData = np.empty((rowCount,4))
#i = 0
#for _ , pos in posDF.groupby('FID'):
# allData[i,0] = 0.5*(pos['xm'].iloc[0]+pos['xm'].iloc[1])
# allData[i,1] = 0.5*(pos['ym'].iloc[0]+pos['ym'].iloc[1])
# allData[i,2] = math.atan2(pos['ym'].iloc[0]-pos['ym'].iloc[1],pos['xm'].iloc[0]-pos['xm'].iloc[1])
# allData[i,3] = pos['FID'].iloc[0]
# i = i + 1
# # we don't know heads from tails so add an entry for each direction
# allData[i,0] = 0.5*(pos['xm'].iloc[0]+pos['xm'].iloc[1])
# allData[i,1] = 0.5*(pos['ym'].iloc[0]+pos['ym'].iloc[1])
# allData[i,2] = math.atan2(pos['ym'].iloc[1]-pos['ym'].iloc[0],pos['xm'].iloc[1]-pos['xm'].iloc[0])
# allData[i,3] = pos['FID'].iloc[0]
# i = i + 1
#
###checks
##s1=10
##s2=20
##plt.figure()
##plt.plot(posDF['XCOORD'].values[s1:s2],posDF['YCOORD'].values[s1:s2],'.')
##plt.figure()
##plt.quiver(allData[s1:s2,0],allData[s1:s2,1], np.cos(allData[s1:s2,2]), np.sin(allData[s1:s2,2]))
#
#
#
## build an array to store the relative angles and distances to all neighbours
#locations = np.zeros((0,3)).astype(np.float32)
#for thisRow in range(rowCount):
# thisX = allData[thisRow,0]
# thisY = allData[thisRow,1]
# thisAngle = (allData[thisRow,2])
# thisTrack = (allData[thisRow,3])
#
# # find all animals at this time point in the clip that aren't the focal individual
# window = allData[(allData[:,3]!=thisTrack),:]
# rowLoc = np.zeros((0,3)).astype(np.float32)
# for w in window:
# xj = w[0]
# yj = w[1]
# jAngle = (w[2])
# r = ((((thisX-xj)**2+(thisY-yj)**2))**0.5)
# if r>100:
# continue
# dx = xj - thisX
# dy = yj - thisY
# angle = math.atan2(dy,dx)
# angle = angle - thisAngle
# jAngle = jAngle - thisAngle
# #angle = math.atan2(dy,dx)
# theta = math.atan2(math.sin(angle), math.cos(angle))
# jHeading = math.atan2(math.sin(jAngle), math.cos(jAngle))
# rowLoc = np.vstack((rowLoc,[r, theta, jHeading]))
# locations = np.vstack((locations,rowLoc))
#
#
### POLAR PLOT OF RELATIVE POSITIONS
##BL = is approx 32 pixels
#binn2=19 # distance bins
#binn1=36
#
#dr = 0.5 # width of distance bins
#sr = 0.25 # start point of distance
#maxr=sr+(dr*binn2)
#theta2 = np.linspace(0.0,2.0 * np.pi, binn1+1)
#r2 = np.linspace(sr, maxr, binn2+1)
#areas = pi*((r2+dr)**2-r2**2)/binn1
#areas = areas[0:-1]
#areas=np.tile(areas,(binn1,1)).T
#
## wrap to [0, 2pi]
#locations[locations[:,1]<0,1] = locations[locations[:,1]<0,1] + 2 *pi
#
#hista2=np.histogram2d(x=locations[:,0],y=locations[:,1],bins=[r2,theta2],normed=1)[0]
#
#hista2 =hista2/areas
#
#size = 8
## make a square figure
#
#fig1=plt.figure(figsize=(8,8))
#ax2=plt.subplot(projection="polar",frameon=False)
#im=ax2.pcolormesh(theta2,r2,hista2,lw=0.0,vmin=0,vmax=0.15,cmap='viridis')
##im=ax2.pcolormesh(theta2,r2,hista2,lw=0.0,vmin=0.0005,vmax=0.002,cmap='viridis')
#ax2.yaxis.set_visible(False)
#
## angle lines
#ax2.set_thetagrids(angles=np.arange(0,360,45),labels=['', '45°', '90°', '135°', '', '225°','270°', '315°'],frac=1.1)
#ax1 = ax2.figure.add_axes(ax2.get_position(), projection='polar',label='twin', frame_on=False,theta_direction=ax2.get_theta_direction(), theta_offset=ax2.get_theta_offset())
#ax1.yaxis.set_visible(False)
#ax1.set_thetagrids(angles=np.arange(0,360,45),labels=['front', '', '', '', 'back', '','', ''],frac=1.1)
##colourbar
#position=fig1.add_axes([1.1,0.12,0.04,0.8])
#cbar=plt.colorbar(im,cax=position)
#cbar.set_label('Neighbour density', rotation=90,fontsize='xx-large',labelpad=15)
#
##body length legend - draws the ticks and
#axes=ax2
#factor = 0.98
#d = axes.get_yticks()[-1] #* factor
#r_tick_labels = [0] + axes.get_yticks()
#r_ticks = (np.array(r_tick_labels) ** 2 + d ** 2) ** 0.5
#theta_ticks = np.arcsin(d / r_ticks) + np.pi / 2
#r_axlabel = (np.mean(r_tick_labels) ** 2 + d ** 2) ** 0.5
#theta_axlabel = np.arcsin(d / r_axlabel) + np.pi / 2
#
## fixed offsets in x
#offset_spine = transforms.ScaledTranslation(-100, 0, axes.transScale)
#offset_ticklabels = transforms.ScaledTranslation(-10, 0, axes.transScale)
#offset_axlabel = transforms.ScaledTranslation(-40, 0, axes.transScale)
#
## apply these to the data coordinates of the line/ticks
#trans_spine = axes.transData + offset_spine
#trans_ticklabels = trans_spine + offset_ticklabels
#trans_axlabel = trans_spine + offset_axlabel
#axes.plot(theta_ticks, r_ticks, '-_k', transform=trans_spine, clip_on=False)
#
## plot the 'tick labels'
#for ii in range(len(r_ticks)):
# axes.text(theta_ticks[ii], r_ticks[ii], "%.0f" % r_tick_labels[ii], ha="right", va="center", clip_on=False, transform=trans_ticklabels)
#
## plot the 'axis label'
#axes.text(theta_axlabel, r_axlabel, 'distance (meters)',rotation='vertical', fontsize='xx-large', ha='right', va='center', clip_on=False, transform=trans_axlabel)# family='Trebuchet MS')
#
#
#fig1.savefig(posfilename + ".png",bbox_inches='tight',dpi=100)
## plot the 'spine'
#plt.figure()
#binn2=39 # distance bins
#
#
#dr = 0.5 # width of distance bins
#sr = 0.25 # start point of distance
#maxr=sr+(dr*binn2)
#
#r2 = np.linspace(sr, maxr, binn2+1)
#areas = pi*((r2+dr)**2-r2**2)
#areas = areas[0:-1]
#
#
#
#hista1=np.histogram(locations[:,0],bins=r2,normed=1)[0]
#
#hista1 =hista1/areas
#plt.plot(r2[:-1]+0.5*dr,hista1,'.-')
#plt.ylim([0,0.01])
#plt.savefig(posfilename + "_dist.png",bbox_inches='tight',dpi=100)
#
##
##
#### POLAR PLOT OF ALIGNMENT
##cosRelativeAngles = np.cos(locations[:,2])
##sinRelativeAngles = np.sin(locations[:,2])
##
### find the average cos and sin of the relative headings to calculate circular statistics
##histcos=binned_statistic_2d(x=locations[:,0],y=locations[:,1],values=cosRelativeAngles, statistic='mean', bins=[r2,theta2])[0]
##histsin=binned_statistic_2d(x=locations[:,0],y=locations[:,1],values=sinRelativeAngles, statistic='mean', bins=[r2,theta2])[0]
##
### mean is atan and std dev is 1-R
##relativeAngles = np.arctan2(histsin,histcos)
##stdRelativeAngles = np.sqrt( 1 - np.sqrt(histcos**2+histsin**2))
##minSD = np.nanmin(stdRelativeAngles)
##maxSD = np.nanmax(stdRelativeAngles)
##
##stdRelativeAngles[np.isnan(stdRelativeAngles)]=0
##
##
##fig1=plt.figure(figsize=(8,8))
##ax2=plt.subplot(projection="polar",frameon=False)
##im=ax2.pcolormesh(theta2,r2,stdRelativeAngles,lw=0.0,vmin=minSD,vmax=maxSD,cmap='viridis_r')
##ax2.yaxis.set_visible(False)
##
### angle lines
##ax2.set_thetagrids(angles=np.arange(0,360,45),labels=['', '45°', '90°', '135°', '', '225°','270°', '315°'],frac=1.1)
##ax1 = ax2.figure.add_axes(ax2.get_position(), projection='polar',label='twin', frame_on=False,theta_direction=ax2.get_theta_direction(), theta_offset=ax2.get_theta_offset())
##ax1.yaxis.set_visible(False)
##ax1.set_thetagrids(angles=np.arange(0,360,45),labels=['front', '', '', '', 'back', '','', ''],frac=1.1)
###colourbar
##position=fig1.add_axes([1.1,0.12,0.04,0.8])
##cbar=plt.colorbar(im,cax=position)
##cbar.set_label('Circular variance', rotation=90,fontsize='xx-large',labelpad=15)
##
###body length legend - draws the ticks and
##axes=ax2
##factor = 0.98
##d = axes.get_yticks()[-1] #* factor
##r_tick_labels = [0] + axes.get_yticks()
##r_ticks = (np.array(r_tick_labels) ** 2 + d ** 2) ** 0.5
##theta_ticks = np.arcsin(d / r_ticks) + np.pi / 2
##r_axlabel = (np.mean(r_tick_labels) ** 2 + d ** 2) ** 0.5
##theta_axlabel = np.arcsin(d / r_axlabel) + np.pi / 2
##
### fixed offsets in x
##offset_spine = transforms.ScaledTranslation(-100, 0, axes.transScale)
##offset_ticklabels = transforms.ScaledTranslation(-10, 0, axes.transScale)
##offset_axlabel = transforms.ScaledTranslation(-40, 0, axes.transScale)
##
### apply these to the data coordinates of the line/ticks
##trans_spine = axes.transData + offset_spine
##trans_ticklabels = trans_spine + offset_ticklabels
##trans_axlabel = trans_spine + offset_axlabel
##axes.plot(theta_ticks, r_ticks, '-_k', transform=trans_spine, clip_on=False)
##
### plot the 'tick labels'
##for ii in range(len(r_ticks)):
## axes.text(theta_ticks[ii], r_ticks[ii], "%.0f" % r_tick_labels[ii], ha="right", va="center", clip_on=False, transform=trans_ticklabels)
##
### plot the 'axis label'
##axes.text(theta_axlabel, r_axlabel, 'metres',rotation='vertical', fontsize='xx-large', ha='right', va='center', clip_on=False, transform=trans_axlabel)# family='Trebuchet MS')
##
##
##fig1.savefig("order.png",bbox_inches='tight',dpi=100)
##
##
#### POLAR PLOT OF ATTRACTION
##
##
### find the average cos and sin of the relative headings to calculate circular statistics
##histcos=binned_statistic_2d(x=locations[:,0],y=locations[:,1],values=cosRelativeAngles, statistic='mean', bins=[r2,theta2])[0]
##histsin=binned_statistic_2d(x=locations[:,0],y=locations[:,1],values=sinRelativeAngles, statistic='mean', bins=[r2,theta2])[0]
##
##
##angles = 0.5*(theta2[0:-1]+theta2[1:])
##angles=np.tile(angles,(binn2,1))
##
##toOrigin = -(histcos*np.cos(angles) + histsin*np.sin(angles))
##fig1=plt.figure(figsize=(8,8))
##ax2=plt.subplot(projection="polar",frameon=False)
##im=ax2.pcolormesh(theta2,r2,toOrigin,lw=0.0,vmin=np.nanmin(toOrigin),vmax=np.nanmax(toOrigin),cmap='viridis')
##ax2.yaxis.set_visible(False)
##
### angle lines
##ax2.set_thetagrids(angles=np.arange(0,360,45),labels=['', '45°', '90°', '135°', '', '225°','270°', '315°'],frac=1.1)
##ax1 = ax2.figure.add_axes(ax2.get_position(), projection='polar',label='twin', frame_on=False,theta_direction=ax2.get_theta_direction(), theta_offset=ax2.get_theta_offset())
##ax1.yaxis.set_visible(False)
##ax1.set_thetagrids(angles=np.arange(0,360,45),labels=['front', '', '', '', 'back', '','', ''],frac=1.1)
###colourbar
##position=fig1.add_axes([1.1,0.12,0.04,0.8])
##cbar=plt.colorbar(im,cax=position)
##cbar.set_label('Attraction', rotation=90,fontsize='xx-large',labelpad=15)
##
###body length legend - draws the ticks and
##axes=ax2
##factor = 0.98
##d = axes.get_yticks()[-1] #* factor
##r_tick_labels = [0] + axes.get_yticks()
##r_ticks = (np.array(r_tick_labels) ** 2 + d ** 2) ** 0.5
##theta_ticks = np.arcsin(d / r_ticks) + np.pi / 2
##r_axlabel = (np.mean(r_tick_labels) ** 2 + d ** 2) ** 0.5
##theta_axlabel = np.arcsin(d / r_axlabel) + np.pi / 2
##
### fixed offsets in x
##offset_spine = transforms.ScaledTranslation(-100, 0, axes.transScale)
##offset_ticklabels = transforms.ScaledTranslation(-10, 0, axes.transScale)
##offset_axlabel = transforms.ScaledTranslation(-40, 0, axes.transScale)
##
### apply these to the data coordinates of the line/ticks
##trans_spine = axes.transData + offset_spine
##trans_ticklabels = trans_spine + offset_ticklabels
##trans_axlabel = trans_spine + offset_axlabel
##axes.plot(theta_ticks, r_ticks, '-_k', transform=trans_spine, clip_on=False)
##
### plot the 'tick labels'
##for ii in range(len(r_ticks)):
## axes.text(theta_ticks[ii], r_ticks[ii], "%.0f" % r_tick_labels[ii], ha="right", va="center", clip_on=False, transform=trans_ticklabels)
##
### plot the 'axis label'
##axes.text(theta_axlabel, r_axlabel, 'metres',rotation='vertical', fontsize='xx-large', ha='right', va='center', clip_on=False, transform=trans_axlabel)# family='Trebuchet MS')
##
##
##fig1.savefig("toOrigin.png",bbox_inches='tight',dpi=100)
##
|
mit
|
psychopy/versions
|
psychopy/visual/helpers.py
|
1
|
16419
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper functions shared by the visual classes
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import absolute_import, division, print_function
from past.builtins import basestring
from builtins import range
import os
import copy
from pkg_resources import parse_version
from psychopy import logging, colors
# tools must only be imported *after* event or MovieStim breaks on win32
# (JWP has no idea why!)
from psychopy.tools.arraytools import val2array
from psychopy.tools.attributetools import setAttribute
from psychopy.tools.filetools import pathToString
import numpy as np
reportNImageResizes = 5 # stop raising warning after this
# global _nImageResizes
_nImageResizes = 0
try:
import matplotlib
if parse_version(matplotlib.__version__) > parse_version('1.2'):
from matplotlib.path import Path as mplPath
else:
from matplotlib import nxutils
haveMatplotlib = True
except Exception:
haveMatplotlib = False
def pointInPolygon(x, y, poly):
"""Determine if a point is inside a polygon; returns True if inside.
(`x`, `y`) is the point to test. `poly` is a list of 3 or more vertices
as (x,y) pairs. If given an object, such as a `ShapeStim`, will try to
use its vertices and position as the polygon.
Same as the `.contains()` method elsewhere.
"""
try: # do this using try:...except rather than hasattr() for speed
poly = poly.verticesPix # we want to access this only once
except Exception:
pass
nVert = len(poly)
if nVert < 3:
msg = 'pointInPolygon expects a polygon with 3 or more vertices'
logging.warning(msg)
return False
# faster if have matplotlib tools:
if haveMatplotlib:
if parse_version(matplotlib.__version__) > parse_version('1.2'):
return mplPath(poly).contains_point([x, y])
else:
try:
return bool(nxutils.pnpoly(x, y, poly))
except Exception:
pass
# fall through to pure python:
# adapted from http://local.wasp.uwa.edu.au/~pbourke/geometry/insidepoly/
# via http://www.ariel.com.au/a/python-point-int-poly.html
inside = False
# trace (horizontal?) rays, flip inside status if cross an edge:
p1x, p1y = poly[-1]
for p2x, p2y in poly:
if y > min(p1y, p2y) and y <= max(p1y, p2y) and x <= max(p1x, p2x):
if p1y != p2y:
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def polygonsOverlap(poly1, poly2):
"""Determine if two polygons intersect; can fail for very pointy polygons.
Accepts two polygons, as lists of vertices (x,y) pairs. If given an object
with with (vertices + pos), will try to use that as the polygon.
Checks if any vertex of one polygon is inside the other polygon. Same as
the `.overlaps()` method elsewhere.
:Notes:
We implement special handling for the `Line` stimulus as it is not a
proper polygon.
We do not check for class instances because this would require importing of
`visual.Line`, creating a circular import. Instead, we assume that a
"polygon" with only two vertices is meant to specify a line. Pixels between
the endpoints get interpolated before testing for overlap.
"""
try: # do this using try:...except rather than hasattr() for speed
if poly1.verticesPix.shape == (2, 2): # Line
# Interpolate pixels.
x = np.arange(poly1.verticesPix[0, 0],
poly1.verticesPix[1, 0] + 1)
y = np.arange(poly1.verticesPix[0, 1],
poly1.verticesPix[1, 1] + 1)
poly1_vert_pix = np.column_stack((x,y))
else:
poly1_vert_pix = poly1.verticesPix
except AttributeError:
poly1_vert_pix = poly1
try: # do this using try:...except rather than hasattr() for speed
if poly2.verticesPix.shape == (2, 2): # Line
# Interpolate pixels.
x = np.arange(poly2.verticesPix[0, 0],
poly2.verticesPix[1, 0] + 1)
y = np.arange(poly2.verticesPix[0, 1],
poly2.verticesPix[1, 1] + 1)
poly2_vert_pix = np.column_stack((x,y))
else:
poly2_vert_pix = poly2.verticesPix
except AttributeError:
poly2_vert_pix = poly2
# faster if have matplotlib tools:
if haveMatplotlib:
if parse_version(matplotlib.__version__) > parse_version('1.2'):
if any(mplPath(poly1_vert_pix).contains_points(poly2_vert_pix)):
return True
return any(mplPath(poly2_vert_pix).contains_points(poly1_vert_pix))
else:
try: # deprecated in matplotlib 1.2
if any(nxutils.points_inside_poly(poly1_vert_pix,
poly2_vert_pix)):
return True
return any(nxutils.points_inside_poly(poly2_vert_pix,
poly1_vert_pix))
except Exception:
pass
# fall through to pure python:
for p1 in poly1_vert_pix:
if pointInPolygon(p1[0], p1[1], poly2_vert_pix):
return True
for p2 in poly2_vert_pix:
if pointInPolygon(p2[0], p2[1], poly1_vert_pix):
return True
return False
def setTexIfNoShaders(obj):
"""Useful decorator for classes that need to update Texture after
other properties. This doesn't actually perform the update, but sets
a flag so the update occurs at draw time (in case multiple changes all
need updates only do it once).
"""
if hasattr(obj, 'useShaders') and not obj.useShaders:
# we aren't using shaders
if hasattr(obj, '_needTextureUpdate'):
obj._needTextureUpdate = True
def setColor(obj, color, colorSpace=None, operation='',
rgbAttrib='rgb', # or 'fillRGB' etc
colorAttrib='color', # or 'fillColor' etc
colorSpaceAttrib=None, # e.g. 'colorSpace' or 'fillColorSpace'
log=True):
"""Provides the workings needed by setColor, and can perform this for
any arbitrary color type (e.g. fillColor,lineColor etc).
OBS: log argument is deprecated - has no effect now.
Logging should be done when setColor() is called.
"""
# how this works:
# rather than using obj.rgb=rgb this function uses setattr(obj,'rgb',rgb)
# color represents the color in the native space
# colorAttrib is the name that color will be assigned using
# setattr(obj,colorAttrib,color)
# rgb is calculated from converting color
# rgbAttrib is the attribute name that rgb is stored under,
# e.g. lineRGB for obj.lineRGB
# colorSpace and takes name from colorAttrib+space e.g.
# obj.lineRGBSpace=colorSpace
if colorSpaceAttrib is None:
colorSpaceAttrib = colorAttrib + 'Space'
# Handle strings and returns immediately as operations, colorspace etc.
# does not apply here.
if isinstance(color, basestring):
if operation not in ('', None):
raise TypeError('Cannot do operations on named or hex color')
if color.lower() in colors.colors255:
# set rgb, color and colorSpace
setattr(obj, rgbAttrib,
np.array(colors.colors255[color.lower()], float))
obj.__dict__[colorSpaceAttrib] = 'named' # e.g. 3rSpace='named'
obj.__dict__[colorAttrib] = color # e.g. obj.color='red'
setTexIfNoShaders(obj)
return
elif color[0] == '#' or color[0:2] == '0x':
# e.g. obj.rgb=[0,0,0]
setattr(obj, rgbAttrib, np.array(colors.hex2rgb255(color)))
obj.__dict__[colorSpaceAttrib] = 'hex' # eg obj.colorSpace='hex'
obj.__dict__[colorAttrib] = color # eg Qr='#000000'
setTexIfNoShaders(obj)
return
else:
# we got a string, but it isn't in the list of named colors and
# doesn't work as a hex
raise AttributeError(
"PsychoPy can't interpret the color string '%s'" % color)
else:
# If it wasn't a string, do check and conversion of scalars,
# sequences and other stuff.
color = val2array(color, length=3) # enforces length 1 or 3
if color is None:
setattr(obj, rgbAttrib, None) # e.g. obj.rgb=[0,0,0]
obj.__dict__[colorSpaceAttrib] = None # e.g. obj.colorSpace='hex'
obj.__dict__[colorAttrib] = None # e.g. obj.color='#000000'
setTexIfNoShaders(obj)
# at this point we have a numpy array of 3 vals
# check if colorSpace is given and use obj.colorSpace if not
if colorSpace is None:
colorSpace = getattr(obj, colorSpaceAttrib)
# using previous color space - if we got this far in the
# _stColor function then we haven't been given a color name -
# we don't know what color space to use.
if colorSpace in ('named', 'hex'):
logging.error("If you setColor with a numeric color value then"
" you need to specify a color space, e.g. "
"setColor([1,1,-1],'rgb'), unless you used a "
"numeric value previously in which case PsychoPy "
"will reuse that color space.)")
return
# check whether combining sensible colorSpaces (e.g. can't add things to
# hex or named colors)
if operation != '' and getattr(obj, colorSpaceAttrib) in ['named', 'hex']:
msg = ("setColor() cannot combine ('%s') colors "
"within 'named' or 'hex' color spaces")
raise AttributeError(msg % operation)
elif operation != '' and colorSpace != getattr(obj, colorSpaceAttrib):
msg = ("setColor cannot combine ('%s') colors"
" from different colorSpaces (%s,%s)")
raise AttributeError(msg % (operation, obj.colorSpace, colorSpace))
else: # OK to update current color
if colorSpace == 'named':
# operations don't make sense for named
obj.__dict__[colorAttrib] = color
else:
setAttribute(obj, colorAttrib, color, log=False,
operation=operation, stealth=True)
# get window (for color conversions)
if colorSpace in ['dkl', 'lms']: # only needed for these spaces
if hasattr(obj, 'dkl_rgb'):
win = obj # obj is probably a Window
elif hasattr(obj, 'win'):
win = obj.win # obj is probably a Stimulus
else:
win = None
logging.error("_setColor() is being applied to something"
" that has no known Window object")
# convert new obj.color to rgb space
newColor = getattr(obj, colorAttrib)
if colorSpace in ['rgb', 'rgb255', 'named']:
setattr(obj, rgbAttrib, newColor)
elif colorSpace == 'dkl':
if (win.dkl_rgb is None or
np.all(win.dkl_rgb == np.ones([3, 3]))):
dkl_rgb = None
else:
dkl_rgb = win.dkl_rgb
setattr(obj, rgbAttrib, colors.dkl2rgb(
np.asarray(newColor).transpose(), dkl_rgb))
elif colorSpace == 'lms':
if (win.lms_rgb is None or
np.all(win.lms_rgb == np.ones([3, 3]))):
lms_rgb = None
elif win.monitor.getPsychopyVersion() < '1.76.00':
logging.error("The LMS calibration for this monitor was carried"
" out before version 1.76.00."
" We would STRONGLY recommend that you repeat the "
"color calibration before using this color space "
"(contact Jon for further info).")
lms_rgb = win.lms_rgb
else:
lms_rgb = win.lms_rgb
setattr(obj, rgbAttrib, colors.lms2rgb(newColor, lms_rgb))
elif colorSpace == 'hsv':
setattr(obj, rgbAttrib, colors.hsv2rgb(np.asarray(newColor)))
elif colorSpace is None:
pass # probably using named colors?
else:
logging.error('Unknown colorSpace: %s' % colorSpace)
# store name of colorSpace for future ref and for drawing
obj.__dict__[colorSpaceAttrib] = colorSpace
# if needed, set the texture too
setTexIfNoShaders(obj)
# set for groupFlipVert:
immutables = {int, float, str, tuple, int, bool,
np.float64, np.float, np.int, np.long}
def findImageFile(filename):
"""Tests whether the filename is an image file. If not will try some common
alternatives (e.g. extensions .jpg .tif...)
"""
# if user supplied correct path then reutnr quickly
filename = pathToString(filename)
isfile = os.path.isfile
if isfile(filename):
return filename
orig = copy.copy(filename)
# search for file using additional extensions
extensions = ('.jpg', '.png', '.tif', '.bmp', '.gif', '.jpeg', '.tiff')
# not supported: 'svg', 'eps'
def logCorrected(orig, actual):
logging.warn("Requested image {!r} not found but similar filename "
"{!r} exists. This will be used instead but changing the "
"filename is advised.".format(orig, actual))
# it already has one but maybe it's wrong? Remove it
if filename.endswith(extensions):
filename = os.path.splitext(orig)[0]
if isfile(filename):
# had an extension but didn't need one (mac?)
logCorrected(orig, filename)
return filename
# try adding the standard set of extensions
for ext in extensions:
if isfile(filename+ext):
filename += ext
logCorrected(orig, filename)
return filename
def groupFlipVert(flipList, yReflect=0):
"""Reverses the vertical mirroring of all items in list ``flipList``.
Reverses the .flipVert status, vertical (y) positions, and angular
rotation (.ori). Flipping preserves the relations among the group's
visual elements. The parameter ``yReflect`` is the y-value of an
imaginary horizontal line around which to reflect the items;
default = 0 (screen center).
Typical usage is to call once prior to any display; call again to un-flip.
Can be called with a list of all stim to be presented in a given routine.
Will flip a) all psychopy.visual.xyzStim that have a setFlipVert method,
b) the y values of .vertices, and c) items in n x 2 lists that are mutable
(i.e., list, np.array, no tuples): [[x1, y1], [x2, y2], ...]
"""
if type(flipList) != list:
flipList = [flipList]
for item in flipList:
if type(item) in (list, np.ndarray):
if type(item[0]) in (list, np.ndarray) and len(item[0]) == 2:
for i in range(len(item)):
item[i][1] = 2 * yReflect - item[i][1]
else:
msg = 'Cannot vert-flip elements in "{}", type={}'
raise ValueError(msg.format(item, type(item[0])))
elif type(item) in immutables:
raise ValueError('Cannot change immutable item "{}"'.format(item))
if hasattr(item, 'setPos'):
item.setPos([1, -1], '*')
item.setPos([0, 2 * yReflect], '+')
elif hasattr(item, 'pos'): # only if no setPos available
item.pos[1] *= -1
item.pos[1] += 2 * yReflect
if hasattr(item, 'setFlipVert'): # eg TextStim, custom marker
item.setFlipVert(not item.flipVert)
elif hasattr(item, 'vertices'): # and lacks a setFlipVert method
try:
v = item.vertices * [1, -1] # np.array
except Exception:
v = [[item.vertices[i][0], -1 * item.vertices[i][1]]
for i in range(len(item.vertices))]
item.setVertices(v)
if hasattr(item, 'setOri') and item.ori:
# non-zero orientation angle
item.setOri(-1, '*')
item._needVertexUpdate = True
|
gpl-3.0
|
rubikloud/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
294
|
1273
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
initNirvana/Easyphotos
|
env/lib/python3.4/site-packages/IPython/core/display.py
|
4
|
33202
|
# -*- coding: utf-8 -*-
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import json
import mimetypes
import os
import struct
import warnings
from IPython.core.formatters import _safe_get_formatter_method
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def publish_display_data(data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
data=data,
metadata=metadata,
)
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, string_types):
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=u'png',
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width to which to constrain the image in html
height : int
Height to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if ext is not None:
format = ext.lower()
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
elif isinstance(data, bytes) and format == 'png':
# infer image type from image data header,
# only if format might not have been specified.
if data[:2] == _JPEG:
format = 'jpeg'
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=None, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Videos from a file are always embedded.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
mimetype: unicode
Specify the mimetype in case you load in a encoded video.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=False)
"""
if url is None and (data.startswith('http') or data.startswith('https')):
url = data
data = None
embed = False
elif os.path.exists(data):
filename = data
data = None
self.mimetype = mimetype
self.embed = embed if embed is not None else (filename is not None)
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos uses base64 encoded videos.
if self.filename is not None:
mimetypes.init()
mimetype, encoding = mimetypes.guess_type(self.filename)
video = open(self.filename, 'rb').read()
video_encoded = video.encode('base64')
else:
video_encoded = self.data
mimetype = self.mimetype
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, video_encoded)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
from IPython.utils import io
print('\033[2K\r', file=io.stdout, end='')
io.stdout.flush()
print('\033[2K\r', file=io.stderr, end='')
io.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
from IPython.kernel.zmq.pylab.config import InlineBackend
# build kwargs, starting with InlineBackend config
kw = {}
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from IPython.kernel.zmq.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
|
mit
|
karlnapf/shogun
|
examples/undocumented/python/graphical/interactive_gp_demo.py
|
2
|
13912
|
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Heiko Strathmann, Cameron Lai
"""
Shogun Gaussian processes demo based on interactive SVM demo by Christian \
Widmer and Soeren Sonnenburg which itself is based on PyQT Demo by Eli Bendersky
Work to be done on parameter (e.g. kernel width) optimization.
"""
import sys, os, csv
import scipy as SP
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from shogun import *
import shogun as sg
import util
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.create_toy_data()
self.on_show()
def on_show(self):
self.axes.clear()
self.axes.plot(self.x, self.y, 'ro')
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.grid(True)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
x=SP.append(self.x, event.xdata)
self.y=SP.append(self.y, event.ydata)
self.x= x[:,SP.newaxis]
self.on_show()
self.status_text.setText("New data point: x=%f, y=%f"%(event.xdata, event.ydata))
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
#load_action = self.create_action("&Load file",
# shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
#self.add_actions(self.file_menu,
# (load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def clear_data(self):
self.x=SP.array([])
self.y=SP.array([])
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.on_show()
self.status_text.setText("Data cleared")
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "Linear":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "Polynomial":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "Gaussian":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def get_stats(self):
num_train = len(self.x)
str_train = "num training points: %i" % num_train
str_test = "num training points: %s" % self.nTest.text()
return (str_train, str_test)
def create_toy_data(self):
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
x = SP.arange(self.xmin,self.xmax,(self.xmax-self.xmin)/100.0)
C = 2 #offset
b = 0
y = b*x + C + float(self.sine_amplitude.text())*SP.sin(float(self.sine_freq.text())*x)
# dy = b + 1*SP.cos(x)
y += float(self.noise_level.text())*random.randn(y.shape[0])
self.y=y-y.mean()
self.x= x[:,SP.newaxis]
self.on_show()
def learn_kernel_width(self):
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad = machine_evaluation(
machine=gp, features=feat_train, labels=labels,
evaluation_criterion=crit,
differentiable_function=inf)
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(root, grad);
best_combination=grad_search.select_model(1);
self.sigma.setText("1.0")
self.plot_gp()
def plot_gp(self):
feat_train = RealFeatures(self.x.T)
labels = RegressionLabels(self.y)
#[x,y]=self.data.get_data()
#feat_train=RealFeatures(x.T)
#labels=RegressionLabels(y)
n_dimensions = 1
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
#covar_parms = SP.log([2])
#hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
# construct covariance function
width=float(self.sigma.text())
degree=int(self.degree.text())
if kernel_name == "Linear":
gk = LinearKernel(feat_train, feat_train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Polynomial":
gk = sg.kernel("PolyKernel", degree=degree)
gk.init(train, train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "Gaussian":
gk = GaussianKernel(feat_train, feat_train, width)
#SECF = GaussianKernel(feat_train, feat_train, width)
#covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(gk, feat_train, zmean, labels, likelihood);
inf.get_negative_marginal_likelihood()
# location of unispaced predictions
x_test = array([linspace(self.xmin,self.xmax, self.nTest.text())])
feat_test=RealFeatures(x_test)
gp = GaussianProcessRegression(inf)
gp.train()
covariance = gp.get_variance_vector(feat_test)
predictions = gp.get_mean_vector(feat_test)
#print "x_test"
#print feat_test.get_feature_matrix()
#print "mean predictions"
#print predictions.get_labels()
#print "covariances"
#print covariance.get_labels()
self.status_text.setText("Negative Log Marginal Likelihood = %f"%(inf.get_negative_marginal_likelihood()))
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlim((self.xmin,self.xmax))
self.axes.set_ylim((self.ymin,self.ymax))
self.axes.hold(True)
x_test=feat_test.get_feature_matrix()[0]
self.axes.plot(x_test, predictions, 'b-x')
#self.axes.plot(x_test, labels.get_labels(), 'ro')
self.axes.plot(self.x, self.y, 'ro')
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()-3*sqrt(covariance.get_labels()))
#self.axes.plot(feat_test.get_feature_matrix()[0], predictions.get_labels()+3*sqrt(covariance.get_labels()))
upper = predictions+3*sqrt(covariance)
lower = predictions-3*sqrt(covariance)
self.axes.fill_between(x_test, lower, upper, color='grey')
self.axes.hold(False)
self.canvas.draw()
self.fill_series_list(self.get_stats())
def create_main_frame(self):
self.xmin=-5
self.xmax=5
self.ymin=-5
self.ymax=5
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "Gaussian")
self.kernel_combo.insertItem(-1, "Polynomial")
self.kernel_combo.insertItem(-1, "Linear")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
log_label = QLabel("Data points")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
self.sine_freq = QLineEdit()
self.sine_freq.setText("1.0")
self.sine_amplitude = QLineEdit()
self.sine_amplitude.setText("1.0")
self.sigma = QLineEdit()
self.sigma.setText("1.2")
self.degree = QLineEdit()
self.degree.setText("2")
self.noise_level = QLineEdit()
self.noise_level.setText("1")
self.nTest = QLineEdit()
self.nTest.setText("100")
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(QLabel('Sine data setting: '))
spins_hbox.addWidget(QLabel('Sine Freq.'))
spins_hbox.addWidget(self.sine_freq)
spins_hbox.addWidget(QLabel('Sine Amplitude'))
spins_hbox.addWidget(self.sine_amplitude)
spins_hbox.addWidget(QLabel('Noise Level'))
spins_hbox.addWidget(self.noise_level)
spins_hbox.addStretch(1)
spins_hbox2 = QHBoxLayout()
spins_hbox2.addWidget(QLabel('Kernel Setting: '))
spins_hbox2.addWidget(QLabel('Type'))
spins_hbox2.addWidget(self.kernel_combo)
spins_hbox2.addWidget(QLabel("Width"))
spins_hbox2.addWidget(self.sigma)
spins_hbox2.addWidget(QLabel("Degree"))
spins_hbox2.addWidget(self.degree)
spins_hbox2.addStretch(1)
spins_hbox3 = QHBoxLayout()
spins_hbox3.addWidget(QLabel('Test Setting: '))
spins_hbox3.addWidget(QLabel('Number of test points'))
spins_hbox3.addWidget(self.nTest)
spins_hbox3.addStretch(1)
self.show_button = QPushButton("&Train GP")
self.connect(self.show_button, SIGNAL('clicked()'), self.plot_gp)
self.gen_sine_data_button = QPushButton("&Generate Sine Data")
self.connect(self.gen_sine_data_button, SIGNAL('clicked()'), self.create_toy_data)
self.clear_data_button = QPushButton("&Clear")
self.connect(self.clear_data_button, SIGNAL('clicked()'), self.clear_data)
self.learn_kernel_button = QPushButton("&Learn Kernel Width and train GP")
self.connect(self.learn_kernel_button, SIGNAL('clicked()'), self.learn_kernel_width)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(QLabel("Data Points"))
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_vbox.addWidget(QLabel("Settings"))
right2_vbox.addWidget(self.gen_sine_data_button)
right2_vbox.addWidget(self.clear_data_button)
right2_vbox.addWidget(self.show_button)
#right2_vbox.addWidget(self.learn_kernel_button)
right2_vbox.addLayout(spins_hbox)
right2_vbox.addLayout(spins_hbox2)
right2_vbox.addLayout(spins_hbox3)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
bsd-3-clause
|
simpla-fusion/SimPla
|
scripts/read_geqdsk.py
|
1
|
5367
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def read_geqdsk(filename):
"""
'desc' :desc,
'nw' :nw, # Number of horizontal R grid points
'nh' :nh, # Number of vertical Z grid points
'rdim' :rdim, # Horizontal dimension in meter of computational box
'zdim' :zdim, # Vertical dimension in meter of computational box
'rcentr' :rcentr,#
'rleft' :rleft, # Minimum R in meter of rectangular computational box
'zmid' :zmid, # Z of center of computational box in meter
'rmaxis' :rmaxis,# R of magnetic axis in meter
'rmaxis' :zmaxis,# Z of magnetic axis in meter
'simag' :simag, # poloidal flus ax magnetic axis in Weber / rad
'sibry' :sibry, # Poloidal flux at the plasma boundary in Weber / rad
'rcentr' :rcentr,# R in meter of vacuum toroidal magnetic field BCENTR
'bcentr' :bcentr,# Vacuum toroidal magnetic field in Tesla at RCENTR
'current' :current,# Plasma current in Ampere
'fpol' :fpol, # Poloidal current function in m-T, $F=RB_T$ on flux grid
'pres' :pres, # Plasma pressure in $nt/m^2$ on uniform flux grid
'ffprim' :ffprim,# $FF^\prime(\psi)$ in $(mT)^2/(Weber/rad)$ on uniform flux grid
'pprim' :pprim, # $P^\prime(\psi)$ in $(nt/m^2)/(Weber/rad)$ on uniform flux grid
'psizr' :psizr, # Poloidal flus in Webber/rad on the rectangular grid points
'qpsi' :qpsi, # q values on uniform flux grid from axis to boundary
'nbbbs' :nbbbs, # Number of boundary points
'limitr' :limitr,# Number of limiter points
'rbbbs' :rbbbs, # R of boundary points in meter
'zbbbs' :zbbbs, # Z of boundary points in meter
'rlim' :rlim, # R of surrounding limiter contour in meter
'rlim' :zlim, # R of surrounding limiter contour in meter
Torus Current Density
$J_T(Amp/m^2)= R P^\prim(\psi)+ F F^\prim(\psi)/R/\mu_0$
"""
d=open(filename,"r").read().replace("\n","");
desc = d[0:48]
# idum = int(d[48:52])
nw = int(d[52:56])
nh = int(d[56:60])
it=60
(rdim,zdim,rcentr,rleft,zmid,
rmaxis,zmaxis,simag,sibry,bcentr,
current,simag,xdum,rmaxis,xdum,
zmaxis,xdum,sibry,xdum,xdum)=(float(d[it+i*16:it+(i+1)*16]) for i in range(20))
it+=20*16;
fpol=np.array([float(d[it+i*16:it+(i+1)*16]) for i in range(nw)]);
it+=nw*16;
pres=np.array([float(d[it+i*16:it+(i+1)*16]) for i in range(nw)]);
it+=nw*16;
ffprim=np.array([float(d[it+i*16:it+(i+1)*16]) for i in range(nw)]);
it+=nw*16;
pprim=np.array([float(d[it+i*16:it+(i+1)*16]) for i in range(nw)]);
it+=nw*16;
psirz=np.reshape(np.array([float(d[it+i*16:it+(i+1)*16]) for i in range(nw*nh)]),(nw,nh));
it+=nh*nw*16;
qpsi=np.array([float(d[it+i*16:it+(i+1)*16]) for i in range(nw)]);
it+=nw*16;
nbbbs=int(d[it:it+5])
limitr=int(d[it+5:it+10])
it+=10
rbbbs=np.array([float(d[it+i*32:it+i*32+16]) for i in range(nbbbs)]);
zbbbs=np.array([float(d[it+i*32+16:it+i*32+32]) for i in range(nbbbs)]);
it+=nbbbs*16*2;
rlim=np.array([float(d[it+i*32:it+i*32+16]) for i in range(limitr)]);
zlim=np.array([float(d[it+i*32+16:it+i*32+32]) for i in range(limitr)]);
print(current)
print(sibry)
print(nbbbs)
plt.contour(psirz.reshape([nh,nw]),[(sibry-simag)/10.0*i+simag for i in range(10)])
plt.plot((rlim -rleft)/rdim*nw,zlim /zdim*nh+nh/2)
#plt.plot((rbbbs-rleft)/rdim*nw,zbbbs/zdim*nh+nh/2)
plt.show()
return {
'desc' :desc,
'nw' :nw, # Number of horizontal R grid points
'nh' :nh, # Number of vertical Z grid points
'rdim' :rdim, # Horizontal dimension in meter of computational box
'zdim' :zdim, # Vertical dimension in meter of computational box
'rcentr' :rcentr,#
'rleft' :rleft, # Minimum R in meter of rectangular computational box
'zmid' :zmid, # Z of center of computational box in meter
'rmaxis' :rmaxis,# R of magnetic axis in meter
'zmaxis' :zmaxis,# Z of magnetic axis in meter
'simag' :simag, # poloidal flus ax magnetic axis in Weber / rad
'sibry' :sibry, # Poloidal flux at the plasma boundary in Weber / rad
'rcentr' :rcentr,# R in meter of vacuum toroidal magnetic field BCENTR
'bcentr' :bcentr,# Vacuum toroidal magnetic field in Tesla at RCENTR
'current' :current,# Plasma current in Ampere
'fpol' :fpol, # Poloidal current function in m-T, $F=RB_T$ on flux grid
'pres' :pres, # Plasma pressure in $nt/m^2$ on uniform flux grid
'ffprim' :ffprim,# $FF^\prime(\psi)$ in $(mT)^2/(Weber/rad)$ on uniform flux grid
'pprime' :pprime,# $P^\prime(\psi)$ in $(nt/m^2)/(Weber/rad)$ on uniform flux grid
'psizr' :psirz, # Poloidal flus in Webber/rad on the rectangular grid points
'qpsi' :qpsi, # q values on uniform flux grid from axis to boundary
'nbbbs' :nbbbs, # Number of boundary points
'limitr' :limitr,# Number of limiter points
'rbbbs' :rbbbs, # R of boundary points in meter
'zbbbs' :zbbbs, # Z of boundary points in meter
'rlim' :rlim, # R of surrounding limiter contour in meter
'zlim' :zlim, # Z of surrounding limiter contour in meter
}
read_geqdsk('g033068.02750')
|
bsd-3-clause
|
kazemakase/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py
|
254
|
2005
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
bzero/statsmodels
|
statsmodels/examples/ex_kernel_test_functional.py
|
34
|
2246
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 08 19:03:20 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
from statsmodels.regression.linear_model import OLS
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
seed = np.random.randint(999999)
#seed = 661176
print(seed)
np.random.seed(seed)
sig_e = 0.5 #0.1
nobs, k_vars = 200, 1
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
x.sort()
order = 3
exog = x**np.arange(order + 1)
beta = np.array([1, 1, 0.1, 0.0])[:order+1] # 1. / np.arange(1, order + 2)
y_true = np.dot(exog, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
endog = y
print('DGP')
print('nobs=%d, beta=%r, sig_e=%3.1f' % (nobs, beta, sig_e))
mod_ols = OLS(endog, exog[:,:2])
res_ols = mod_ols.fit()
#'cv_ls'[1000, 0.5][0.01, 0.45]
tst = smke.TestFForm(endog, exog[:,:2], bw=[0.01, 0.45], var_type='cc',
fform=lambda x,p: mod_ols.predict(p,x),
estimator=lambda y,x: OLS(y,x).fit().params,
nboot=1000)
print('bw', tst.bw)
print('tst.test_stat', tst.test_stat)
print(tst.sig)
print('tst.boots_results mean, min, max', (tst.boots_results.mean(),
tst.boots_results.min(),
tst.boots_results.max()))
print('lower tail bootstrap p-value', (tst.boots_results < tst.test_stat).mean())
print('upper tail bootstrap p-value', (tst.boots_results >= tst.test_stat).mean())
from scipy import stats
print('aymp.normal p-value (2-sided)', stats.norm.sf(np.abs(tst.test_stat))*2)
print('aymp.normal p-value (upper)', stats.norm.sf(tst.test_stat))
do_plot=True
if do_plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.')
plt.plot(x, res_ols.fittedvalues)
plt.title('OLS fit')
plt.figure()
plt.hist(tst.boots_results.ravel(), bins=20)
plt.title('bootstrap histogram or test statistic')
plt.show()
|
bsd-3-clause
|
ryfeus/lambda-packs
|
Pandas_numpy/source/pandas/io/formats/style.py
|
2
|
41460
|
"""
Module for applying conditional formatting to
DataFrames and Series.
"""
from functools import partial
from itertools import product
from contextlib import contextmanager
from uuid import uuid1
import copy
from collections import defaultdict, MutableMapping
try:
from jinja2 import (
PackageLoader, Environment, ChoiceLoader, FileSystemLoader
)
except ImportError:
msg = "pandas.Styler requires jinja2. "\
"Please install with `conda install Jinja2`\n"\
"or `pip install Jinja2`"
raise ImportError(msg)
from pandas.core.dtypes.common import is_float, is_string_like
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pandas.compat import range
from pandas.core.config import get_option
from pandas.core.generic import _shared_docs
from pandas.core.common import _any_not_none, sentinel_factory
from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice
from pandas.util._decorators import Appender
try:
import matplotlib.pyplot as plt
from matplotlib import colors
has_mpl = True
except ImportError:
has_mpl = False
no_mpl_message = "{0} requires matplotlib."
@contextmanager
def _mpl(func):
if has_mpl:
yield plt, colors
else:
raise ImportError(no_mpl_message.format(func.__name__))
class Styler(object):
"""
Helps style a DataFrame or Series according to the
data with HTML and CSS.
.. versionadded:: 0.17.1
.. warning::
This is a new feature and is under active development.
We'll be adding features and possibly making breaking changes in future
releases.
Parameters
----------
data: Series or DataFrame
precision: int
precision to round floats to, defaults to pd.options.display.precision
table_styles: list-like, default None
list of {selector: (attr, value)} dicts; see Notes
uuid: str, default None
a unique identifier to avoid CSS collisons; generated automatically
caption: str, default None
caption to attach to the table
Attributes
----------
env : Jinja2 Environment
template : Jinja2 Template
loader : Jinja2 Loader
Notes
-----
Most styling will be done by passing style functions into
``Styler.apply`` or ``Styler.applymap``. Style functions should
return values with strings containing CSS ``'attr: value'`` that will
be applied to the indicated cells.
If using in the Jupyter notebook, Styler has defined a ``_repr_html_``
to automatically render itself. Otherwise call Styler.render to get
the genterated HTML.
CSS classes are attached to the generated HTML
* Index and Column names include ``index_name`` and ``level<k>``
where `k` is its level in a MultiIndex
* Index label cells include
* ``row_heading``
* ``row<n>`` where `n` is the numeric position of the row
* ``level<k>`` where `k` is the level in a MultiIndex
* Column label cells include
* ``col_heading``
* ``col<n>`` where `n` is the numeric position of the column
* ``evel<k>`` where `k` is the level in a MultiIndex
* Blank cells include ``blank``
* Data cells include ``data``
See Also
--------
pandas.DataFrame.style
"""
loader = PackageLoader("pandas", "io/formats/templates")
env = Environment(
loader=loader,
trim_blocks=True,
)
template = env.get_template("html.tpl")
def __init__(self, data, precision=None, table_styles=None, uuid=None,
caption=None, table_attributes=None):
self.ctx = defaultdict(list)
self._todo = []
if not isinstance(data, (pd.Series, pd.DataFrame)):
raise TypeError("``data`` must be a Series or DataFrame")
if data.ndim == 1:
data = data.to_frame()
if not data.index.is_unique or not data.columns.is_unique:
raise ValueError("style is not supported for non-unique indicies.")
self.data = data
self.index = data.index
self.columns = data.columns
self.uuid = uuid
self.table_styles = table_styles
self.caption = caption
if precision is None:
precision = get_option('display.precision')
self.precision = precision
self.table_attributes = table_attributes
# display_funcs maps (row, col) -> formatting function
def default_display_func(x):
if is_float(x):
return '{:>.{precision}g}'.format(x, precision=self.precision)
else:
return x
self._display_funcs = defaultdict(lambda: default_display_func)
def _repr_html_(self):
"""Hooks into Jupyter notebook rich display system."""
return self.render()
@Appender(_shared_docs['to_excel'] % dict(
axes='index, columns', klass='Styler',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='\n .. versionadded:: 0.20'))
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def _translate(self):
"""
Convert the DataFrame in `self.data` and the attrs from `_build_styles`
into a dictionary of {head, body, uuid, cellstyle}
"""
table_styles = self.table_styles or []
caption = self.caption
ctx = self.ctx
precision = self.precision
uuid = self.uuid or str(uuid1()).replace("-", "_")
ROW_HEADING_CLASS = "row_heading"
COL_HEADING_CLASS = "col_heading"
INDEX_NAME_CLASS = "index_name"
DATA_CLASS = "data"
BLANK_CLASS = "blank"
BLANK_VALUE = ""
def format_attr(pair):
return "{key}={value}".format(**pair)
# for sparsifying a MultiIndex
idx_lengths = _get_level_lengths(self.index)
col_lengths = _get_level_lengths(self.columns)
cell_context = dict()
n_rlvls = self.data.index.nlevels
n_clvls = self.data.columns.nlevels
rlabels = self.data.index.tolist()
clabels = self.data.columns.tolist()
if n_rlvls == 1:
rlabels = [[x] for x in rlabels]
if n_clvls == 1:
clabels = [[x] for x in clabels]
clabels = list(zip(*clabels))
cellstyle = []
head = []
for r in range(n_clvls):
# Blank for Index columns...
row_es = [{"type": "th",
"value": BLANK_VALUE,
"display_value": BLANK_VALUE,
"is_visible": True,
"class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1)
# ... except maybe the last for columns.names
name = self.data.columns.names[r]
cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS,
"level{lvl}".format(lvl=r)]
name = BLANK_VALUE if name is None else name
row_es.append({"type": "th",
"value": name,
"display_value": name,
"class": " ".join(cs),
"is_visible": True})
if clabels:
for c, value in enumerate(clabels[r]):
cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get(
"col_headings", {}).get(r, {}).get(c, []))
es = {
"type": "th",
"value": value,
"display_value": value,
"class": " ".join(cs),
"is_visible": _is_visible(c, r, col_lengths),
}
colspan = col_lengths.get((r, c), 0)
if colspan > 1:
es["attributes"] = [
format_attr({"key": "colspan", "value": colspan})
]
row_es.append(es)
head.append(row_es)
if self.data.index.names and _any_not_none(*self.data.index.names):
index_header_row = []
for c, name in enumerate(self.data.index.names):
cs = [INDEX_NAME_CLASS,
"level{lvl}".format(lvl=c)]
name = '' if name is None else name
index_header_row.append({"type": "th", "value": name,
"class": " ".join(cs)})
index_header_row.extend(
[{"type": "th",
"value": BLANK_VALUE,
"class": " ".join([BLANK_CLASS])
}] * len(clabels[0]))
head.append(index_header_row)
body = []
for r, idx in enumerate(self.data.index):
row_es = []
for c, value in enumerate(rlabels[r]):
rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c),
"row{row}".format(row=r)]
es = {
"type": "th",
"is_visible": _is_visible(r, c, idx_lengths),
"value": value,
"display_value": value,
"id": "_".join(rid[1:]),
"class": " ".join(rid)
}
rowspan = idx_lengths.get((c, r), 0)
if rowspan > 1:
es["attributes"] = [
format_attr({"key": "rowspan", "value": rowspan})
]
row_es.append(es)
for c, col in enumerate(self.data.columns):
cs = [DATA_CLASS, "row{row}".format(row=r),
"col{col}".format(col=c)]
cs.extend(cell_context.get("data", {}).get(r, {}).get(c, []))
formatter = self._display_funcs[(r, c)]
value = self.data.iloc[r, c]
row_es.append({
"type": "td",
"value": value,
"class": " ".join(cs),
"id": "_".join(cs[1:]),
"display_value": formatter(value)
})
props = []
for x in ctx[r, c]:
# have to handle empty styles like ['']
if x.count(":"):
props.append(x.split(":"))
else:
props.append(['', ''])
cellstyle.append({'props': props,
'selector': "row{row}_col{col}"
.format(row=r, col=c)})
body.append(row_es)
return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid,
precision=precision, table_styles=table_styles,
caption=caption, table_attributes=self.table_attributes)
def format(self, formatter, subset=None):
"""
Format the text display value of cells.
.. versionadded:: 0.18.0
Parameters
----------
formatter: str, callable, or dict
subset: IndexSlice
An argument to ``DataFrame.loc`` that restricts which elements
``formatter`` is applied to.
Returns
-------
self : Styler
Notes
-----
``formatter`` is either an ``a`` or a dict ``{column name: a}`` where
``a`` is one of
- str: this will be wrapped in: ``a.format(x)``
- callable: called with the value of an individual cell
The default display value for numeric values is the "general" (``g``)
format with ``pd.options.display.precision`` precision.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
>>> df.style.format("{:.2%}")
>>> df['c'] = ['a', 'b', 'c', 'd']
>>> df.style.format({'C': str.upper})
"""
if subset is None:
row_locs = range(len(self.data))
col_locs = range(len(self.data.columns))
else:
subset = _non_reducing_slice(subset)
if len(subset) == 1:
subset = subset, self.data.columns
sub_df = self.data.loc[subset]
row_locs = self.data.index.get_indexer_for(sub_df.index)
col_locs = self.data.columns.get_indexer_for(sub_df.columns)
if isinstance(formatter, MutableMapping):
for col, col_formatter in formatter.items():
# formatter must be callable, so '{}' are converted to lambdas
col_formatter = _maybe_wrap_formatter(col_formatter)
col_num = self.data.columns.get_indexer_for([col])[0]
for row_num in row_locs:
self._display_funcs[(row_num, col_num)] = col_formatter
else:
# single scalar to format all cells with
locs = product(*(row_locs, col_locs))
for i, j in locs:
formatter = _maybe_wrap_formatter(formatter)
self._display_funcs[(i, j)] = formatter
return self
def render(self, **kwargs):
r"""
Render the built up styles to HTML
.. versionadded:: 0.17.1
Parameters
----------
**kwargs:
Any additional keyword arguments are passed through
to ``self.template.render``. This is useful when you
need to provide additional variables for a custom
template.
.. versionadded:: 0.20
Returns
-------
rendered: str
the rendered HTML
Notes
-----
``Styler`` objects have defined the ``_repr_html_`` method
which automatically calls ``self.render()`` when it's the
last item in a Notebook cell. When calling ``Styler.render()``
directly, wrap the result in ``IPython.display.HTML`` to view
the rendered HTML in the notebook.
Pandas uses the following keys in render. Arguments passed
in ``**kwargs`` take precedence, so think carefuly if you want
to override them:
* head
* cellstyle
* body
* uuid
* precision
* table_styles
* caption
* table_attributes
"""
self._compute()
# TODO: namespace all the pandas keys
d = self._translate()
# filter out empty styles, every cell will have a class
# but the list of props may just be [['', '']].
# so we have the neested anys below
trimmed = [x for x in d['cellstyle']
if any(any(y) for y in x['props'])]
d['cellstyle'] = trimmed
d.update(kwargs)
return self.template.render(**d)
def _update_ctx(self, attrs):
"""
update the state of the Styler. Collects a mapping
of {index_label: ['<property>: <value>']}
attrs: Series or DataFrame
should contain strings of '<property>: <value>;<prop2>: <val2>'
Whitespace shouldn't matter and the final trailing ';' shouldn't
matter.
"""
for row_label, v in attrs.iterrows():
for col_label, col in v.iteritems():
i = self.index.get_indexer([row_label])[0]
j = self.columns.get_indexer([col_label])[0]
for pair in col.rstrip(";").split(";"):
self.ctx[(i, j)].append(pair)
def _copy(self, deepcopy=False):
styler = Styler(self.data, precision=self.precision,
caption=self.caption, uuid=self.uuid,
table_styles=self.table_styles)
if deepcopy:
styler.ctx = copy.deepcopy(self.ctx)
styler._todo = copy.deepcopy(self._todo)
else:
styler.ctx = self.ctx
styler._todo = self._todo
return styler
def __copy__(self):
"""
Deep copy by default.
"""
return self._copy(deepcopy=False)
def __deepcopy__(self, memo):
return self._copy(deepcopy=True)
def clear(self):
""""Reset" the styler, removing any previously applied styles.
Returns None.
"""
self.ctx.clear()
self._todo = []
def _compute(self):
"""
Execute the style functions built up in `self._todo`.
Relies on the conventions that all style functions go through
.apply or .applymap. The append styles to apply as tuples of
(application method, *args, **kwargs)
"""
r = self
for func, args, kwargs in self._todo:
r = func(self)(*args, **kwargs)
return r
def _apply(self, func, axis=0, subset=None, **kwargs):
subset = slice(None) if subset is None else subset
subset = _non_reducing_slice(subset)
data = self.data.loc[subset]
if axis is not None:
result = data.apply(func, axis=axis, **kwargs)
else:
result = func(data, **kwargs)
if not isinstance(result, pd.DataFrame):
raise TypeError(
"Function {func!r} must return a DataFrame when "
"passed to `Styler.apply` with axis=None"
.format(func=func))
if not (result.index.equals(data.index) and
result.columns.equals(data.columns)):
msg = ('Result of {func!r} must have identical index and '
'columns as the input'.format(func=func))
raise ValueError(msg)
result_shape = result.shape
expected_shape = self.data.loc[subset].shape
if result_shape != expected_shape:
msg = ("Function {func!r} returned the wrong shape.\n"
"Result has shape: {res}\n"
"Expected shape: {expect}".format(func=func,
res=result.shape,
expect=expected_shape))
raise ValueError(msg)
self._update_ctx(result)
return self
def apply(self, func, axis=0, subset=None, **kwargs):
"""
Apply a function column-wise, row-wise, or table-wase,
updating the HTML representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func : function
``func`` should take a Series or DataFrame (depending
on ``axis``), and return an object with the same shape.
Must return a DataFrame with identical index and
column labels when ``axis=None``
axis : int, str or None
apply to each column (``axis=0`` or ``'index'``)
or to each row (``axis=1`` or ``'columns'``) or
to the entire DataFrame at once with ``axis=None``
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
Notes
-----
The output shape of ``func`` should match the input, i.e. if
``x`` is the input row, column, or table (depending on ``axis``),
then ``func(x.shape) == x.shape`` should be true.
This is similar to ``DataFrame.apply``, except that ``axis=None``
applies the function to the entire DataFrame at once,
rather than column-wise or row-wise.
Examples
--------
>>> def highlight_max(x):
... return ['background-color: yellow' if v == x.max() else ''
for v in x]
...
>>> df = pd.DataFrame(np.random.randn(5, 2))
>>> df.style.apply(highlight_max)
"""
self._todo.append((lambda instance: getattr(instance, '_apply'),
(func, axis, subset), kwargs))
return self
def _applymap(self, func, subset=None, **kwargs):
func = partial(func, **kwargs) # applymap doesn't take kwargs?
if subset is None:
subset = pd.IndexSlice[:]
subset = _non_reducing_slice(subset)
result = self.data.loc[subset].applymap(func)
self._update_ctx(result)
return self
def applymap(self, func, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with the result.
.. versionadded:: 0.17.1
Parameters
----------
func : function
``func`` should take a scalar and return a scalar
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``func``
Returns
-------
self : Styler
See Also
--------
Styler.where
"""
self._todo.append((lambda instance: getattr(instance, '_applymap'),
(func, subset), kwargs))
return self
def where(self, cond, value, other=None, subset=None, **kwargs):
"""
Apply a function elementwise, updating the HTML
representation with a style which is selected in
accordance with the return value of a function.
.. versionadded:: 0.21.0
Parameters
----------
cond : callable
``cond`` should take a scalar and return a boolean
value : str
applied when ``cond`` returns true
other : str
applied when ``cond`` returns false
subset : IndexSlice
a valid indexer to limit ``data`` to *before* applying the
function. Consider using a pandas.IndexSlice
kwargs : dict
pass along to ``cond``
Returns
-------
self : Styler
See Also
--------
Styler.applymap
"""
if other is None:
other = ''
return self.applymap(lambda val: value if cond(val) else other,
subset=subset, **kwargs)
def set_precision(self, precision):
"""
Set the precision used to render.
.. versionadded:: 0.17.1
Parameters
----------
precision: int
Returns
-------
self : Styler
"""
self.precision = precision
return self
def set_table_attributes(self, attributes):
"""
Set the table attributes. These are the items
that show up in the opening ``<table>`` tag in addition
to to automatic (by default) id.
.. versionadded:: 0.17.1
Parameters
----------
attributes : string
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_attributes('class="pure-table"')
# ... <table class="pure-table"> ...
"""
self.table_attributes = attributes
return self
def export(self):
"""
Export the styles to applied to the current Styler.
Can be applied to a second style with ``Styler.use``.
.. versionadded:: 0.17.1
Returns
-------
styles: list
See Also
--------
Styler.use
"""
return self._todo
def use(self, styles):
"""
Set the styles on the current Styler, possibly using styles
from ``Styler.export``.
.. versionadded:: 0.17.1
Parameters
----------
styles: list
list of style functions
Returns
-------
self : Styler
See Also
--------
Styler.export
"""
self._todo.extend(styles)
return self
def set_uuid(self, uuid):
"""
Set the uuid for a Styler.
.. versionadded:: 0.17.1
Parameters
----------
uuid: str
Returns
-------
self : Styler
"""
self.uuid = uuid
return self
def set_caption(self, caption):
"""
Se the caption on a Styler
.. versionadded:: 0.17.1
Parameters
----------
caption: str
Returns
-------
self : Styler
"""
self.caption = caption
return self
def set_table_styles(self, table_styles):
"""
Set the table styles on a Styler. These are placed in a
``<style>`` tag before the generated HTML table.
.. versionadded:: 0.17.1
Parameters
----------
table_styles: list
Each individual table_style should be a dictionary with
``selector`` and ``props`` keys. ``selector`` should be a CSS
selector that the style will be applied to (automatically
prefixed by the table's UUID) and ``props`` should be a list of
tuples with ``(attribute, value)``.
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_table_styles(
... [{'selector': 'tr:hover',
... 'props': [('background-color', 'yellow')]}]
... )
"""
self.table_styles = table_styles
return self
# -----------------------------------------------------------------------
# A collection of "builtin" styles
# -----------------------------------------------------------------------
@staticmethod
def _highlight_null(v, null_color):
return ('background-color: {color}'.format(color=null_color)
if pd.isna(v) else '')
def highlight_null(self, null_color='red'):
"""
Shade the background ``null_color`` for missing values.
.. versionadded:: 0.17.1
Parameters
----------
null_color: str
Returns
-------
self : Styler
"""
self.applymap(self._highlight_null, null_color=null_color)
return self
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0,
subset=None):
"""
Color the background in a gradient according to
the data in each column (optionally row).
Requires matplotlib.
.. versionadded:: 0.17.1
Parameters
----------
cmap: str or colormap
matplotlib colormap
low, high: float
compress the range by these values.
axis: int or str
1 or 'columns' for columnwise, 0 or 'index' for rowwise
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
Returns
-------
self : Styler
Notes
-----
Tune ``low`` and ``high`` to keep the text legible by
not using the entire range of the color map. These extend
the range of the data by ``low * (x.max() - x.min())``
and ``high * (x.max() - x.min())`` before normalizing.
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
self.apply(self._background_gradient, cmap=cmap, subset=subset,
axis=axis, low=low, high=high)
return self
@staticmethod
def _background_gradient(s, cmap='PuBu', low=0, high=0):
"""Color background in a range according to the data."""
with _mpl(Styler.background_gradient) as (plt, colors):
rng = s.max() - s.min()
# extend lower / upper bounds, compresses color range
norm = colors.Normalize(s.min() - (rng * low),
s.max() + (rng * high))
# matplotlib modifies inplace?
# https://github.com/matplotlib/matplotlib/issues/5427
normed = norm(s.values)
c = [colors.rgb2hex(x) for x in plt.cm.get_cmap(cmap)(normed)]
return ['background-color: {color}'.format(color=color)
for color in c]
def set_properties(self, subset=None, **kwargs):
"""
Convience method for setting one or more non-data dependent
properties or each cell.
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice
a valid slice for ``data`` to limit the style application to
kwargs: dict
property: value pairs to be set for each cell
Returns
-------
self : Styler
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 4))
>>> df.style.set_properties(color="white", align="right")
>>> df.style.set_properties(**{'background-color': 'yellow'})
"""
values = ';'.join('{p}: {v}'.format(p=p, v=v)
for p, v in kwargs.items())
f = lambda x: values
return self.applymap(f, subset=subset)
@staticmethod
def _bar_left(s, color, width, base):
"""
The minimum value is aligned at the left of the cell
Parameters
----------
color: 2-tuple/list, of [``color_negative``, ``color_positive``]
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
base: str
The base css format of the cell, e.g.:
``base = 'width: 10em; height: 80%;'``
Returns
-------
self : Styler
"""
normed = width * (s - s.min()) / (s.max() - s.min())
zero_normed = width * (0 - s.min()) / (s.max() - s.min())
attrs = (base + 'background: linear-gradient(90deg,{c} {w:.1f}%, '
'transparent 0%)')
return [base if x == 0 else attrs.format(c=color[0], w=x)
if x < zero_normed
else attrs.format(c=color[1], w=x) if x >= zero_normed
else base for x in normed]
@staticmethod
def _bar_center_zero(s, color, width, base):
"""
Creates a bar chart where the zero is centered in the cell
Parameters
----------
color: 2-tuple/list, of [``color_negative``, ``color_positive``]
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
base: str
The base css format of the cell, e.g.:
``base = 'width: 10em; height: 80%;'``
Returns
-------
self : Styler
"""
# Either the min or the max should reach the edge
# (50%, centered on zero)
m = max(abs(s.min()), abs(s.max()))
normed = s * 50 * width / (100.0 * m)
attrs_neg = (base + 'background: linear-gradient(90deg, transparent 0%'
', transparent {w:.1f}%, {c} {w:.1f}%, '
'{c} 50%, transparent 50%)')
attrs_pos = (base + 'background: linear-gradient(90deg, transparent 0%'
', transparent 50%, {c} 50%, {c} {w:.1f}%, '
'transparent {w:.1f}%)')
return [attrs_pos.format(c=color[1], w=(50 + x)) if x >= 0
else attrs_neg.format(c=color[0], w=(50 + x))
for x in normed]
@staticmethod
def _bar_center_mid(s, color, width, base):
"""
Creates a bar chart where the midpoint is centered in the cell
Parameters
----------
color: 2-tuple/list, of [``color_negative``, ``color_positive``]
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
base: str
The base css format of the cell, e.g.:
``base = 'width: 10em; height: 80%;'``
Returns
-------
self : Styler
"""
if s.min() >= 0:
# In this case, we place the zero at the left, and the max() should
# be at width
zero = 0.0
slope = width / s.max()
elif s.max() <= 0:
# In this case, we place the zero at the right, and the min()
# should be at 100-width
zero = 100.0
slope = width / -s.min()
else:
slope = width / (s.max() - s.min())
zero = (100.0 + width) / 2.0 - slope * s.max()
normed = zero + slope * s
attrs_neg = (base + 'background: linear-gradient(90deg, transparent 0%'
', transparent {w:.1f}%, {c} {w:.1f}%, '
'{c} {zero:.1f}%, transparent {zero:.1f}%)')
attrs_pos = (base + 'background: linear-gradient(90deg, transparent 0%'
', transparent {zero:.1f}%, {c} {zero:.1f}%, '
'{c} {w:.1f}%, transparent {w:.1f}%)')
return [attrs_pos.format(c=color[1], zero=zero, w=x) if x > zero
else attrs_neg.format(c=color[0], zero=zero, w=x)
for x in normed]
def bar(self, subset=None, axis=0, color='#d65f5f', width=100,
align='left'):
"""
Color the background ``color`` proptional to the values in each column.
Excludes non-numeric data by default.
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
axis: int
color: str or 2-tuple/list
If a str is passed, the color is the same for both
negative and positive numbers. If 2-tuple/list is used, the
first element is the color_negative and the second is the
color_positive (eg: ['#d65f5f', '#5fba7d'])
width: float
A number between 0 or 100. The largest value will cover ``width``
percent of the cell's width
align : {'left', 'zero',' mid'}, default 'left'
- 'left' : the min value starts at the left of the cell
- 'zero' : a value of zero is located at the center of the cell
- 'mid' : the center of the cell is at (max-min)/2, or
if values are all negative (positive) the zero is aligned
at the right (left) of the cell
.. versionadded:: 0.20.0
Returns
-------
self : Styler
"""
subset = _maybe_numeric_slice(self.data, subset)
subset = _non_reducing_slice(subset)
base = 'width: 10em; height: 80%;'
if not(is_list_like(color)):
color = [color, color]
elif len(color) == 1:
color = [color[0], color[0]]
elif len(color) > 2:
msg = ("Must pass `color` as string or a list-like"
" of length 2: [`color_negative`, `color_positive`]\n"
"(eg: color=['#d65f5f', '#5fba7d'])")
raise ValueError(msg)
if align == 'left':
self.apply(self._bar_left, subset=subset, axis=axis, color=color,
width=width, base=base)
elif align == 'zero':
self.apply(self._bar_center_zero, subset=subset, axis=axis,
color=color, width=width, base=base)
elif align == 'mid':
self.apply(self._bar_center_mid, subset=subset, axis=axis,
color=color, width=width, base=base)
else:
msg = ("`align` must be one of {'left', 'zero',' mid'}")
raise ValueError(msg)
return self
def highlight_max(self, subset=None, color='yellow', axis=0):
"""
Highlight the maximum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=True)
def highlight_min(self, subset=None, color='yellow', axis=0):
"""
Highlight the minimum by shading the background
.. versionadded:: 0.17.1
Parameters
----------
subset: IndexSlice, default None
a valid slice for ``data`` to limit the style application to
color: str, default 'yellow'
axis: int, str, or None; default 0
0 or 'index' for columnwise (default), 1 or 'columns' for rowwise,
or ``None`` for tablewise
Returns
-------
self : Styler
"""
return self._highlight_handler(subset=subset, color=color, axis=axis,
max_=False)
def _highlight_handler(self, subset=None, color='yellow', axis=None,
max_=True):
subset = _non_reducing_slice(_maybe_numeric_slice(self.data, subset))
self.apply(self._highlight_extrema, color=color, axis=axis,
subset=subset, max_=max_)
return self
@staticmethod
def _highlight_extrema(data, color='yellow', max_=True):
"""Highlight the min or max in a Series or DataFrame"""
attr = 'background-color: {0}'.format(color)
if data.ndim == 1: # Series from .apply
if max_:
extrema = data == data.max()
else:
extrema = data == data.min()
return [attr if v else '' for v in extrema]
else: # DataFrame from .tee
if max_:
extrema = data == data.max().max()
else:
extrema = data == data.min().min()
return pd.DataFrame(np.where(extrema, attr, ''),
index=data.index, columns=data.columns)
@classmethod
def from_custom_template(cls, searchpath, name):
"""
Factory function for creating a subclass of ``Styler``
with a custom template and Jinja environment.
Parameters
----------
searchpath : str or list
Path or paths of directories containing the templates
name : str
Name of your custom template to use for rendering
Returns
-------
MyStyler : subclass of Styler
has the correct ``env`` and ``template`` class attributes set.
"""
loader = ChoiceLoader([
FileSystemLoader(searchpath),
cls.loader,
])
class MyStyler(cls):
env = Environment(loader=loader)
template = env.get_template(name)
return MyStyler
def _is_visible(idx_row, idx_col, lengths):
"""
Index -> {(idx_row, idx_col): bool})
"""
return (idx_col, idx_row) in lengths
def _get_level_lengths(index):
"""
Given an index, find the level lenght for each element.
Result is a dictionary of (level, inital_position): span
"""
sentinel = sentinel_factory()
levels = index.format(sparsify=sentinel, adjoin=False, names=False)
if index.nlevels == 1:
return {(0, i): 1 for i, value in enumerate(levels)}
lengths = {}
for i, lvl in enumerate(levels):
for j, row in enumerate(lvl):
if not get_option('display.multi_sparse'):
lengths[(i, j)] = 1
elif row != sentinel:
last_label = j
lengths[(i, last_label)] = 1
else:
lengths[(i, last_label)] += 1
return lengths
def _maybe_wrap_formatter(formatter):
if is_string_like(formatter):
return lambda x: formatter.format(x)
elif callable(formatter):
return formatter
else:
msg = ("Expected a template string or callable, got {formatter} "
"instead".format(formatter=formatter))
raise TypeError(msg)
|
mit
|
knutfrode/opendrift
|
examples/example_entrainment_rate.py
|
1
|
2670
|
#!/usr/bin/env python
from datetime import datetime, timedelta
from opendrift.models.openoil3D import OpenOil3D
import matplotlib.pyplot as plt
####################################################
# Tkcalich & Chan (2002) entrainment rate
####################################################
o = OpenOil3D(loglevel=0, weathering_model='noaa')
o.fallback_values['land_binary_mask'] = 0
o.fallback_values['x_sea_water_velocity'] = -.2
o.fallback_values['y_sea_water_velocity'] = 0
o.fallback_values['x_wind'] = 12
o.fallback_values['y_wind'] = 0
o.fallback_values['sea_surface_wave_stokes_drift_x_velocity'] = .3
o.fallback_values['sea_surface_wave_stokes_drift_y_velocity'] = 0
o.set_config('wave_entrainment:entrainment_rate', 'Tkalich & Chan (2002)')
o.set_config('wave_entrainment:droplet_size_distribution', 'Johansen et al. (2015)')
o.set_config('processes:evaporation', False)
o.set_config('processes:dispersion', False)
o.set_config('turbulentmixing:droplet_diameter_min_wavebreaking', 1e-6)
o.set_config('turbulentmixing:droplet_diameter_max_wavebreaking', 1e-3)
o.seed_elements(lon=4, lat=60, time=datetime.now(), number=1000,
radius=100, z=0, oiltype='VILJE')
o.run(duration=timedelta(hours=24), time_step=900)
######################################################
# Li et al. (2017) entrainment rate
######################################################
o2 = OpenOil3D(loglevel=0, weathering_model='noaa')
o2.fallback_values['land_binary_mask'] = 0
o2.fallback_values['x_sea_water_velocity'] = -.2
o2.fallback_values['y_sea_water_velocity'] = 0
o2.fallback_values['x_wind'] = 12
o2.fallback_values['y_wind'] = 0
o2.fallback_values['sea_surface_wave_stokes_drift_x_velocity'] = .3
o2.fallback_values['sea_surface_wave_stokes_drift_y_velocity'] = 0
o2.set_config('wave_entrainment:entrainment_rate', 'Li et al. (2017)')
o2.set_config('wave_entrainment:droplet_size_distribution', 'Johansen et al. (2015)')
o2.set_config('processes:evaporation', False)
o2.set_config('processes:dispersion', False)
o2.set_config('turbulentmixing:droplet_diameter_min_wavebreaking', 1e-6)
o2.set_config('turbulentmixing:droplet_diameter_max_wavebreaking', 1e-3)
o2.seed_elements(lon=4, lat=60, time=datetime.now(), number=1000,
radius=100, z=0, oiltype='VILJE')
o2.run(duration=timedelta(hours=24), time_step=900)
###########################
# Plotting and comparing
###########################
o.plot_vertical_distribution()
o2.plot_vertical_distribution()
o.plot_oil_budget()
o2.plot_oil_budget()
legend = ['Tkalich & Chan (2002)', 'Li et al. (2017)']
o.animation_profile(compare=o2, legend=legend)
o.animation(compare=o2, legend=legend)
|
gpl-2.0
|
potash/scikit-learn
|
benchmarks/bench_plot_nmf.py
|
24
|
5742
|
"""
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
"""
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
"""
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
|
bsd-3-clause
|
piosz/test-infra
|
mungegithub/issue_labeler/simple_app.py
|
2
|
5103
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from logging.handlers import RotatingFileHandler
import numpy as np # pylint: disable=import-error
from flask import Flask, request # pylint: disable=import-error
from sklearn.feature_extraction import FeatureHasher # pylint: disable=import-error
from sklearn.externals import joblib # pylint: disable=import-error
from sklearn.linear_model import SGDClassifier # pylint: disable=import-error
from nltk.tokenize import RegexpTokenizer # pylint: disable=import-error
from nltk.stem.porter import PorterStemmer # pylint: disable=import-error
APP = Flask(__name__)
# parameters
TEAM_FN = './models/trained_teams_model.pkl'
COMPONENT_FN = './models/trained_components_model.pkl'
LOG_FILE = '/tmp/issue-labeler.log'
LOG_SIZE = 1024*1024*100
NUM_FEATURES = 262144
MY_LOSS = 'hinge'
MY_ALPHA = .1
MY_PENALTY = 'l2'
MY_HASHER = FeatureHasher(input_type='string', n_features=NUM_FEATURES, non_negative=True)
MY_STEMMER = PorterStemmer()
TOKENIZER = RegexpTokenizer(r'\w+')
STOPWORDS = []
try:
if not STOPWORDS:
STOPWORDS_FILENAME = './stopwords.txt'
with open(STOPWORDS_FILENAME, 'r') as fp:
STOPWORDS = list([word.strip() for word in fp])
except: # pylint:disable=bare-except
# don't remove any stopwords
STOPWORDS = []
@APP.errorhandler(500)
def internal_error(exception):
return str(exception), 500
@APP.route("/", methods=['POST'])
def get_labels():
"""
The request should contain 2 form-urlencoded parameters
1) title : title of the issue
2) body: body of the issue
It returns a team/<label> and a component/<label>
"""
title = request.form.get('title', '')
body = request.form.get('body', '')
tokens = tokenize_stem_stop(" ".join([title, body]))
team_mod = joblib.load(TEAM_FN)
comp_mod = joblib.load(COMPONENT_FN)
vec = MY_HASHER.transform([tokens])
tlabel = team_mod.predict(vec)[0]
clabel = comp_mod.predict(vec)[0]
return ",".join([tlabel, clabel])
def tokenize_stem_stop(input_string):
input_string = input_string.encode('utf-8')
cur_title_body = TOKENIZER.tokenize(input_string.decode('utf-8').lower())
return [MY_STEMMER.stem(x) for x in cur_title_body if x not in STOPWORDS]
@APP.route("/update_models", methods=['PUT'])
def update_model(): # pylint: disable=too-many-locals
"""
data should contain three fields
titles: list of titles
bodies: list of bodies
labels: list of list of labels
"""
data = request.json
titles = data.get('titles')
bodies = data.get('bodies')
labels = data.get('labels')
t_tokens = []
c_tokens = []
team_labels = []
component_labels = []
for (title, body, label_list) in zip(titles, bodies, labels):
t_label = [x for x in label_list if x.startswith('team')]
c_label = [x for x in label_list if x.startswith('component')]
tokens = tokenize_stem_stop(" ".join([title, body]))
if t_label:
team_labels += t_label
t_tokens += [tokens]
if c_label:
component_labels += c_label
c_tokens += [tokens]
t_vec = MY_HASHER.transform(t_tokens)
c_vec = MY_HASHER.transform(c_tokens)
if team_labels:
if os.path.isfile(TEAM_FN):
team_model = joblib.load(TEAM_FN)
team_model.partial_fit(t_vec, np.array(team_labels))
else:
# no team model stored so build a new one
team_model = SGDClassifier(loss=MY_LOSS, penalty=MY_PENALTY, alpha=MY_ALPHA)
team_model.fit(t_vec, np.array(team_labels))
if component_labels:
if os.path.isfile(COMPONENT_FN):
component_model = joblib.load(COMPONENT_FN)
component_model.partial_fit(c_vec, np.array(component_labels))
else:
# no comp model stored so build a new one
component_model = SGDClassifier(loss=MY_LOSS, penalty=MY_PENALTY, alpha=MY_ALPHA)
component_model.fit(c_vec, np.array(component_labels))
joblib.dump(team_model, TEAM_FN)
joblib.dump(component_model, COMPONENT_FN)
return ""
def configure_logger():
log_format = '%(asctime)-20s %(levelname)-10s %(message)s'
file_handler = RotatingFileHandler(LOG_FILE, maxBytes=LOG_SIZE, backupCount=3)
formatter = logging.Formatter(log_format)
file_handler.setFormatter(formatter)
APP.logger.addHandler(file_handler)
if __name__ == "__main__":
configure_logger()
APP.run(host="0.0.0.0")
|
apache-2.0
|
andreadelprete/sot-torque-control
|
python/compress_hrpsys_data.py
|
2
|
3119
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 11:36:11 2015
Compress old data in the log format of hrpsys in a format that is compatible with the new data
collected using the RealTimeTracer.
@author: adelpret
"""
from compute_estimates_from_sensors import compute_estimates_from_sensors
from load_hrpsys_log import load_hrpsys_log_astate
from load_hrpsys_log import load_hrpsys_log_rstate
import numpy as np
import matplotlib.pyplot as plt
out_data_folder = '../results/20140807-legTorqueId1/';
OUT_DATA_FILE_NAME = 'data.npz';
A_STATE_FILE = '/home/adelpret/devel/yarp_gazebo/src/motorFrictionIdentification/data/20140807-legTorqueId/legTorqueId_pos1-astate.log';
R_STATE_FILE = '/home/adelpret/devel/yarp_gazebo/src/motorFrictionIdentification/data/20140807-legTorqueId/legTorqueId_pos1-rstate.log';
ESTIMATION_DELAY = 0.2;
COMPUTE_TORQUES_WITHOUT_GYRO = False;
sensors = load_hrpsys_log_astate(A_STATE_FILE, 'rad');
ref = load_hrpsys_log_rstate(R_STATE_FILE, 'rad');
#sensors = sensors[:5000];
#ref = ref[:5000];
(torques, dq, ddq) = compute_estimates_from_sensors(sensors, ESTIMATION_DELAY);
if(COMPUTE_TORQUES_WITHOUT_GYRO):
sensors['gyro'] = np.zeros(sensors['gyro'].shape);
(torques_no_gyro, dq, ddq) = compute_estimates_from_sensors(sensors, ESTIMATION_DELAY);
for i in range(12): #torques.shape[1]):
print "Plot data joint %d out of %d" % (i, torques.shape[1]);
f, ax = plt.subplots(1, 1, sharex=True);
ax.plot(sensors['time'], sensors['torque'][:,i], 'b');
delta_q = ref['enc'][:,i]-sensors['enc'][:,i];
scale = np.mean(sensors['torque'][:,i])/np.mean(delta_q);
ax.plot(sensors['time'], scale*delta_q, 'r--');
ax.plot(sensors['time'], torques[:,i], 'g--')
ax.legend(['hrpsys','delta_q','torque']);
ax.set_title('torque hrpsys');
f, ax = plt.subplots(3, 1, sharex=True);
ax[0].plot(sensors['time'], torques[:,i]);
if(COMPUTE_TORQUES_WITHOUT_GYRO):
ax[0].plot(sensors['time'], torques_no_gyro[:,i]);
ax[0].set_title('Torque joint '+str(i));
ax[1].plot(sensors['time'], sensors['enc'][:,i]);
ax[1].plot(sensors['time'], sensors['enc'][:,i] - ref['enc'][:,i]);
ax[1].set_title('Angle joint '+str(i));
ax[2].plot(sensors['time'], dq[:,i]);
ax[2].set_title('Velocity joint '+str(i));
ax[1].legend(['Angle', 'Delta_q']);
if(COMPUTE_TORQUES_WITHOUT_GYRO):
ax[0].legend(['Torque w/ gyro', 'Torque w/o gyro']);
plt.show();
DT = float(sensors['time'][1] - sensors['time'][0]); # sampling period
LOST_SAMPLES = int(ESTIMATION_DELAY/DT);
print "Gonna shift data of %d samples to compensate for estimation delay" % LOST_SAMPLES;
if(COMPUTE_TORQUES_WITHOUT_GYRO):
np.savez(out_data_folder+OUT_DATA_FILE_NAME, dq=dq[:-LOST_SAMPLES,:], tau=torques_no_gyro[:-LOST_SAMPLES,:],
qDes=ref['enc'][LOST_SAMPLES:,:30], enc=sensors['enc'][LOST_SAMPLES:,:30]);
else:
np.savez(out_data_folder+OUT_DATA_FILE_NAME, dq=dq[:-LOST_SAMPLES,:], tau=torques[:-LOST_SAMPLES,:],
qDes=ref['enc'][LOST_SAMPLES:,:30], enc=sensors['enc'][LOST_SAMPLES:,:30]);
|
lgpl-3.0
|
pnedunuri/scikit-learn
|
examples/text/mlcomp_sparse_document_classification.py
|
292
|
4498
|
"""
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
|
bsd-3-clause
|
waterponey/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
21
|
9858
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..utils.validation import has_fit_parameter, check_is_fitted
def _parallel_fit_estimator(estimator, X, y, sample_weight):
"""Private function used to fit an estimator within a job."""
if sample_weight is not None:
estimator.fit(X, y, sample_weight)
else:
estimator.fit(X, y)
return estimator
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for ``fit``.
If -1, then the number of jobs is set to the number of cores.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array-like, shape = [n_predictions]
The classes labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None, n_jobs=1):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
if sample_weight is not None:
for name, step in self.estimators:
if not has_fit_parameter(step, 'sample_weight'):
raise ValueError('Underlying estimator \'%s\' does not support'
' sample weights.' % name)
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
transformed_y = self.le_.transform(y)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_fit_estimator)(clone(clf), X, transformed_y,
sample_weight)
for _, clf in self.estimators)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_samples, n_classifiers]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
florentchandelier/zipline
|
zipline/finance/risk/period.py
|
3
|
7440
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
from six import iteritems
import numpy as np
import pandas as pd
from . import risk
from . risk import check_entry
from empyrical import (
alpha_beta_aligned,
annual_volatility,
cum_returns,
downside_risk,
max_drawdown,
sharpe_ratio,
sortino_ratio
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_session, end_session, returns, trading_calendar,
treasury_curves, benchmark_returns, algorithm_leverages=None):
if treasury_curves.index[-1] >= start_session:
mask = ((treasury_curves.index >= start_session) &
(treasury_curves.index <= end_session))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self._start_session = start_session
self._end_session = end_session
self.trading_calendar = trading_calendar
trading_sessions = trading_calendar.sessions_in_range(
self._start_session,
self._end_session,
)
self.algorithm_returns = self.mask_returns_to_period(returns,
trading_sessions)
# Benchmark needs to be masked to the same dates as the algo returns
self.benchmark_returns = self.mask_returns_to_period(
benchmark_returns,
self.algorithm_returns.index
)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
cum_returns(self.benchmark_returns).iloc[-1]
self.algorithm_period_returns = \
cum_returns(self.algorithm_returns).iloc[-1]
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self._start_session,
end=self._end_session
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.mean_algorithm_returns = (
self.algorithm_returns.cumsum() /
np.arange(1, self.num_trading_days + 1, dtype=np.float64)
)
self.benchmark_volatility = annual_volatility(self.benchmark_returns)
self.algorithm_volatility = annual_volatility(self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self._start_session,
self._end_session,
self.trading_calendar,
)
self.sharpe = sharpe_ratio(
self.algorithm_returns,
)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.downside_risk = downside_risk(
self.algorithm_returns.values
)
self.sortino = sortino_ratio(
self.algorithm_returns.values,
_downside_risk=self.downside_risk,
)
self.alpha, self.beta = alpha_beta_aligned(
self.algorithm_returns.values,
self.benchmark_returns.values,
)
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = max_drawdown(self.algorithm_returns.values)
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self._end_session.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns, trading_days):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_day_mask = returns.index.normalize().isin(trading_days)
mask = ((returns.index >= self._start_session) &
(returns.index <= self._end_session) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages)
|
apache-2.0
|
WaysonKong/blog
|
scikit-learn/glm.py
|
2
|
1464
|
from sklearn import linear_model
feature = [[0, 0], [1, 1], [2, 2]]
label = [0, 1, 2]
def get_model_msg(model_name):
return "----" + model_name + "----\n"
# linear model
print get_model_msg("linear model")
reg = linear_model.LinearRegression()
## 1. train
reg.fit(feature, label)
print reg.coef_
## 2. predict
print reg.predict([[3, 3], [4, 4]])
# SVM
print get_model_msg("SVM")
from sklearn import svm
X = [[0, 0], [1, 1]]
y = [0, 1]
clf = svm.SVC()
clf.fit(X, y)
print clf.predict([[2.,2.]])
# Naive Bayes
print get_model_msg("Navie Bayes")
from sklearn import datasets
iris = datasets.load_iris()
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
y_pred = gnb.fit(iris.data, iris.target).predict(iris.data)
print ("all=%d, mislabeled=%d" % (iris.data.shape[0], (iris.target != y_pred).sum() ) )
# CV
print get_model_msg("CV method")
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
iris = datasets.load_iris()
print iris.data.shape, iris.target.shape
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size = 0.3, random_state = 0)
print X_train.shape, y_train.shape
print X_test.shape, y_test.shape
clf = svm.SVC(kernel = 'linear', C = 1 )
print clf.fit(X_train, y_train).score(X_test, y_test)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, iris.data, iris.target, cv = 5)
print scores
|
gpl-3.0
|
kklmn/xrt
|
examples/withRaycing/15_XRD/xrd_mono.py
|
1
|
7121
|
# -*- coding: utf-8 -*-
"""
__author__ = "Roman Chernikov", "Konstantin Klementiev"
__date__ = "2018-10-01"
Created with xrtQook
Single Crystal Diffraction
------------------
Sample script to calculate the Single Crystal Laue Diffraction pattern.
"""
import sys
import os
sys.path.append(os.path.join('..', '..', '..'))
import matplotlib as mpl # analysis:ignore
from matplotlib import pyplot as plt # analysis:ignore
import xrt.backends.raycing.sources as rsources # analysis:ignore
import xrt.backends.raycing.screens as rscreens # analysis:ignore
import xrt.backends.raycing.materials as rmats # analysis:ignore
import xrt.backends.raycing.oes as roes # analysis:ignore
import xrt.backends.raycing.apertures as rapts # analysis:ignore
import xrt.backends.raycing.run as rrun # analysis:ignore
import xrt.backends.raycing as raycing # analysis:ignore
import xrt.plotter as xrtplot # analysis:ignore
import xrt.runner as xrtrun # analysis:ignore
PowderSample = rmats.Powder(
chi=[0, 6.283185307179586],
name='CeO2 powder',
hkl=[5, 5, 5],
a=5.256,
atoms=[58, 58, 58, 58, 8, 8, 8, 8, 8, 8, 8, 8],
atomsXYZ=[[0.0, 0.0, 0.0], [0.0, 0.5, 0.5], [0.5, 0.0, 0.5],
[0.5, 0.5, 0.0], [0.25, 0.25, 0.25], [0.25, 0.75, 0.75],
[0.75, 0.25, 0.75], [0.75, 0.75, 0.25], [0.75, 0.75, 0.75],
[0.75, 0.25, 0.25], [0.25, 0.75, 0.25], [0.25, 0.25, 0.75]],
t=1.0,
table=r"Chantler total")
MonoCrystalSample = rmats.MonoCrystal(
Nmax=5, # from [-5, -5, -5] to [5, 5, 5]. Updated in plot_generator
name='Silicon',
geom='Laue reflected',
hkl=[0, 0, 1],
a=5.41949,
t=0.1,
table=r"Chantler total")
powder = False
dSize = 150
detLimits = [-dSize, dSize]
def build_beamline():
P02_2 = raycing.BeamLine()
P02_2.Undulator01 = rsources.GeometricSource(
bl=P02_2,
nrays=5e4,
name='source',
polarization='horizontal',
dx=0.5,
dz=0.5,
dxprime=0.005e-3,
dzprime=0.005e-3,
distx='normal',
distz='normal',
energies=(60000, 60) if powder else (1000, 100000),
distE='normal' if powder else 'flat')
P02_2.FSM_Source = rscreens.Screen(
bl=P02_2,
name=r"FSM_Source",
center=[0, 29001, 0])
P02_2.Sample = roes.LauePlate(
bl=P02_2,
name=r"CeO2 Powder Sample" if powder else "Silicon 001 wafer",
center=[0, 65000, 0],
pitch='90deg',
yaw=0 if powder else '45deg',
rotationSequence='RxRyRz',
material=PowderSample if powder else MonoCrystalSample,
targetOpenCL=(0, 0),
precisionOpenCL='float32')
P02_2.FSM_Sample = rscreens.Screen(
bl=P02_2,
name=r"After Sample",
center=[0, 65100, 0])
P02_2.RoundBeamStop01 = rapts.RoundBeamStop(
bl=P02_2,
name=r"BeamStop",
center=[0, 65149, 0],
r=5)
P02_2.Frame = rapts.RectangularAperture(
bl=P02_2,
name=r"Frame",
center=[0, 65149.5, 0],
opening=[-dSize, dSize, -dSize, dSize])
P02_2.FSM_Detector = rscreens.Screen(
bl=P02_2,
name=r"Detector",
center=[0, 65150, 0])
return P02_2
def run_process(P02_2):
Undulator01beamGlobal01 = P02_2.Undulator01.shine(
withAmplitudes=False)
FSM_SourcebeamLocal01 = P02_2.FSM_Source.expose(
beam=Undulator01beamGlobal01)
SamplebeamGlobal01, SamplebeamLocal01 = P02_2.Sample.reflect(
beam=Undulator01beamGlobal01)
RoundBeamStop01beamLocal01 = P02_2.RoundBeamStop01.propagate(
beam=SamplebeamGlobal01)
Frame01beamLocal01 = P02_2.Frame.propagate( # analysis:ignore
beam=SamplebeamGlobal01)
FSM_DetectorbeamLocal01 = P02_2.FSM_Detector.expose(
beam=SamplebeamGlobal01)
outDict = {
'Undulator01beamGlobal01': Undulator01beamGlobal01,
'FSM_SourcebeamLocal01': FSM_SourcebeamLocal01,
'SamplebeamGlobal01': SamplebeamGlobal01,
'SamplebeamLocal01': SamplebeamLocal01,
'RoundBeamStop01beamLocal01': RoundBeamStop01beamLocal01,
'FSM_DetectorbeamLocal01': FSM_DetectorbeamLocal01}
return outDict
rrun.run_process = run_process
def define_plots():
plots = []
Plot01 = xrtplot.XYCPlot(
beam=r"FSM_SourcebeamLocal01",
xaxis=xrtplot.XYCAxis(
label=r"x",
bins=256,
ppb=1,
fwhmFormatStr=r"%.2f"),
yaxis=xrtplot.XYCAxis(
label=r"z",
bins=256,
ppb=1,
fwhmFormatStr=r"%.2f"),
caxis=xrtplot.XYCAxis(
label=r"energy",
unit=r"eV",
bins=256,
ppb=1,
fwhmFormatStr=r"%.2f"),
title=r"01 - Undulator Beam at 29m",
fluxFormatStr=r"%g",
saveName=r"01 - Undulator Beam at 29m.png")
plots.append(Plot01)
Plot03 = xrtplot.XYCPlot(
beam=r"FSM_DetectorbeamLocal01",
xaxis=xrtplot.XYCAxis(
label=r"x",
limits=detLimits,
bins=512,
ppb=1,
fwhmFormatStr=r"%.2f"),
yaxis=xrtplot.XYCAxis(
label=r"z",
limits=detLimits,
bins=512,
ppb=1,
fwhmFormatStr=r"%.2f"),
caxis=xrtplot.XYCAxis(
label=r"energy",
unit=r"eV",
bins=512,
ppb=1,
fwhmFormatStr=r"%.2f"),
title=r"03 - Detector",
fluxFormatStr=r"%g"
)
plots.append(Plot03)
return plots
def plot_generator(beamLine, plots):
for n in [5]:
if powder:
beamLine.Sample.material.hkl = [n, n, n]
else:
beamLine.Sample.material.Nmax = n + 1
plots[-1].title = "03 - Detector. Nmax={}".format(n+1)
plots[-1].saveName = "03 - Detector Nmax {}.png".format(n+1)
# plots[-1].persistentName = "03 - Detector Nmax {} fp32.mat".format(n+1) # analysis:ignore
yield
def main():
P02_2 = build_beamline()
plots = define_plots()
xrtrun.run_ray_tracing(
plots=plots,
generator=plot_generator,
generatorArgs=[P02_2, plots],
repeats=10,
# pickleEvery=5, # analysis:ignore
backend=r"raycing",
beamLine=P02_2,
afterScript=plotJet,
afterScriptArgs=[plots])
def plotJet(plots):
plt.figure(100, figsize=(12, 12))
extent = list(plots[-1].xaxis.limits)
extent.extend(plots[-1].yaxis.limits)
plt.imshow(plots[-1].total2D,
extent=extent, aspect='equal', origin='lower',
# norm=mpl.colors.LogNorm(), # uncomment this to plot in log scale # analysis:ignore
norm=mpl.colors.PowerNorm(0.22), # reduce gamma (say, to 0.3) to increase contrast # analysis:ignore
cmap='binary')
plt.xlabel(plots[-1].xaxis.displayLabel, fontsize=12)
plt.ylabel(plots[-1].yaxis.displayLabel, fontsize=12)
plt.savefig("Lauegram Intensity.png")
plt.show()
if __name__ == '__main__':
main()
|
mit
|
holygits/incubator-airflow
|
setup.py
|
1
|
9910
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import pip
import sys
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted changes
are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Git repo not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='[0-9]*', exact_match=True,
tags=True, dirty=True)
assert tag == version, (tag, version)
return '.release:{version}+{sha}'.format(version=version,
sha=sha)
except git.GitCommandError:
return '.dev0+{sha}'.format(sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
def check_previous():
installed_packages = ([package.project_name for package
in pip.get_installed_distributions()])
if 'airflow' in installed_packages:
print("An earlier non-apache version of Airflow was installed, "
"please uninstall it first. Then reinstall.")
sys.exit(1)
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
azure = ['azure-storage>=0.34.0']
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
cgroups = [
'cgroupspy>=0.1.4',
]
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.15.2, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
emr = ['boto3>=1.0.0']
gcp_api = [
'httplib2',
'google-api-python-client>=1.5.0, <1.6.0',
'oauth2client>=2.0.2, <2.1.0',
'PyOpenSSL',
'google-cloud-dataflow',
'pandas-gbq'
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
jira = ['JIRA>1.0.7']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.7.1']
ssh = ['paramiko>=2.1.1']
salesforce = ['simple-salesforce>=0.72']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmb>=1.1.19']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8',
'kerberos>=1.2.5']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.6']
cloudant = ['cloudant>=0.5.9,<2.0'] # major update coming soon, clamp to 0.x
redis = ['redis>=2.10.5']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant
devel = [
'click',
'freezegun',
'jira',
'lxml>=3.3.4',
'mock',
'moto',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'rednose',
'paramiko'
]
devel_minreq = devel + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker + ssh
def do_setup():
check_previous()
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach==2.0.0',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.11, <0.12',
'flask-admin==1.4.1',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf==0.14',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <2.9.0',
'lxml>=3.6.0, <4.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'psutil>=4.2.0, <5.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.14.2',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'tabulate>=0.7.5, <0.8.0',
'thrift>=0.9.2, <0.10',
'zope.deprecation>=4.0, <5.0',
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'azure': azure,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'kerberos': kerberos,
'ldap': ldap,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'slack': slack,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'jira': jira,
'redis': redis,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
},
)
if __name__ == "__main__":
do_setup()
|
apache-2.0
|
shoyer/xarray
|
xarray/conventions.py
|
1
|
26571
|
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
from .coding import strings, times, variables
from .coding.variables import SerializationWarning, pop_to
from .core import duck_array_ops, indexing
from .core.common import contains_cftime_datetimes
from .core.pycompat import dask_array_type
from .core.variable import IndexVariable, Variable, as_variable
class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from non-native to native endianness
This is useful for decoding arrays from netCDF3 files (which are all
big endian) into native endianness, so they can be used with Cython
functions, such as those found in bottleneck and pandas.
>>> x = np.arange(5, dtype=">i2")
>>> x.dtype
dtype('>i2')
>>> NativeEndianArray(x).dtype
dtype('int16')
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""
__slots__ = ("array",)
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Decode arrays on the fly from integer to boolean datatype
This is useful for decoding boolean arrays from integer typed netCDF
variables.
>>> x = np.array([1, 0, 1, 1, 0], dtype="i1")
>>> x.dtype
dtype('>i2')
>>> BoolTypeArray(x).dtype
dtype('bool')
>>> BoolTypeArray(x)[:].dtype
dtype('bool')
"""
__slots__ = ("array",)
def __init__(self, array):
self.array = indexing.as_indexable(array)
@property
def dtype(self):
return np.dtype("bool")
def __getitem__(self, key):
return np.asarray(self.array[key], dtype=self.dtype)
def _var_as_tuple(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def maybe_encode_nonstring_dtype(var, name=None):
if "dtype" in var.encoding and var.encoding["dtype"] not in ("S1", str):
dims, data, attrs, encoding = _var_as_tuple(var)
dtype = np.dtype(encoding.pop("dtype"))
if dtype != var.dtype:
if np.issubdtype(dtype, np.integer):
if (
np.issubdtype(var.dtype, np.floating)
and "_FillValue" not in var.attrs
and "missing_value" not in var.attrs
):
warnings.warn(
"saving variable %s with floating "
"point data as an integer dtype without "
"any _FillValue to use for NaNs" % name,
SerializationWarning,
stacklevel=10,
)
data = duck_array_ops.around(data)[...]
data = data.astype(dtype=dtype)
var = Variable(dims, data, attrs, encoding)
return var
def maybe_default_fill_value(var):
# make NaN the fill value for float types:
if (
"_FillValue" not in var.attrs
and "_FillValue" not in var.encoding
and np.issubdtype(var.dtype, np.floating)
):
var.attrs["_FillValue"] = var.dtype.type(np.nan)
return var
def maybe_encode_bools(var):
if (
(var.dtype == np.bool)
and ("dtype" not in var.encoding)
and ("dtype" not in var.attrs)
):
dims, data, attrs, encoding = _var_as_tuple(var)
attrs["dtype"] = "bool"
data = data.astype(dtype="i1", copy=True)
var = Variable(dims, data, attrs, encoding)
return var
def _infer_dtype(array, name=None):
"""Given an object array with no missing values, infer its dtype from its
first element
"""
if array.dtype.kind != "O":
raise TypeError("infer_type must be called on a dtype=object array")
if array.size == 0:
return np.dtype(float)
element = array[(0,) * array.ndim]
if isinstance(element, (bytes, str)):
return strings.create_vlen_dtype(type(element))
dtype = np.array(element).dtype
if dtype.kind != "O":
return dtype
raise ValueError(
"unable to infer dtype on variable {!r}; xarray "
"cannot serialize arbitrary Python objects".format(name)
)
def ensure_not_multiindex(var, name=None):
if isinstance(var, IndexVariable) and isinstance(var.to_index(), pd.MultiIndex):
raise NotImplementedError(
"variable {!r} is a MultiIndex, which cannot yet be "
"serialized to netCDF files "
"(https://github.com/pydata/xarray/issues/1077). Use "
"reset_index() to convert MultiIndex levels into coordinate "
"variables instead.".format(name)
)
def _copy_with_dtype(data, dtype):
"""Create a copy of an array with the given dtype.
We use this instead of np.array() to ensure that custom object dtypes end
up on the resulting array.
"""
result = np.empty(data.shape, dtype)
result[...] = data
return result
def ensure_dtype_not_object(var, name=None):
# TODO: move this from conventions to backends? (it's not CF related)
if var.dtype.kind == "O":
dims, data, attrs, encoding = _var_as_tuple(var)
if isinstance(data, dask_array_type):
warnings.warn(
"variable {} has data in the form of a dask array with "
"dtype=object, which means it is being loaded into memory "
"to determine a data type that can be safely stored on disk. "
"To avoid this, coerce this variable to a fixed-size dtype "
"with astype() before saving it.".format(name),
SerializationWarning,
)
data = data.compute()
missing = pd.isnull(data)
if missing.any():
# nb. this will fail for dask.array data
non_missing_values = data[~missing]
inferred_dtype = _infer_dtype(non_missing_values, name)
# There is no safe bit-pattern for NA in typical binary string
# formats, we so can't set a fill_value. Unfortunately, this means
# we can't distinguish between missing values and empty strings.
if strings.is_bytes_dtype(inferred_dtype):
fill_value = b""
elif strings.is_unicode_dtype(inferred_dtype):
fill_value = ""
else:
# insist on using float for numeric values
if not np.issubdtype(inferred_dtype, np.floating):
inferred_dtype = np.dtype(float)
fill_value = inferred_dtype.type(np.nan)
data = _copy_with_dtype(data, dtype=inferred_dtype)
data[missing] = fill_value
else:
data = _copy_with_dtype(data, dtype=_infer_dtype(data, name))
assert data.dtype.kind != "O" or data.dtype.metadata
var = Variable(dims, data, attrs, encoding)
return var
def encode_cf_variable(var, needs_copy=True, name=None):
"""
Converts an Variable into an Variable which follows some
of the CF conventions:
- Nans are masked using _FillValue (or the deprecated missing_value)
- Rescaling via: scale_factor and add_offset
- datetimes are converted to the CF 'units since time' format
- dtype encodings are enforced.
Parameters
----------
var : xarray.Variable
A variable holding un-encoded data.
Returns
-------
out : xarray.Variable
A variable which has been encoded as described above.
"""
ensure_not_multiindex(var, name=name)
for coder in [
times.CFDatetimeCoder(),
times.CFTimedeltaCoder(),
variables.CFScaleOffsetCoder(),
variables.CFMaskCoder(),
variables.UnsignedIntegerCoder(),
]:
var = coder.encode(var, name=name)
# TODO(shoyer): convert all of these to use coders, too:
var = maybe_encode_nonstring_dtype(var, name=name)
var = maybe_default_fill_value(var)
var = maybe_encode_bools(var)
var = ensure_dtype_not_object(var, name=name)
return var
def decode_cf_variable(
name,
var,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
decode_endianness=True,
stack_char_dim=True,
use_cftime=None,
):
"""
Decodes a variable which may hold CF encoded information.
This includes variables that have been masked and scaled, which
hold CF style time variables (this is almost always the case if
the dataset has been serialized) and which have strings encoded
as character arrays.
Parameters
----------
name: str
Name of the variable. Used for better error messages.
var : Variable
A variable holding potentially CF encoded information.
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue). If the _Unsigned attribute is present
treat integer arrays as unsigned.
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
decode_endianness : bool
Decode arrays from non-native to native endianness.
stack_char_dim : bool
Whether to stack characters into bytes along the last dimension of this
array. Passed as an argument because we need to look at the full
dataset to figure out if this is appropriate.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
out : Variable
A variable holding the decoded equivalent of var.
"""
var = as_variable(var)
original_dtype = var.dtype
if concat_characters:
if stack_char_dim:
var = strings.CharacterArrayCoder().decode(var, name=name)
var = strings.EncodedStringCoder().decode(var)
if mask_and_scale:
for coder in [
variables.UnsignedIntegerCoder(),
variables.CFMaskCoder(),
variables.CFScaleOffsetCoder(),
]:
var = coder.decode(var, name=name)
if decode_times:
for coder in [
times.CFTimedeltaCoder(),
times.CFDatetimeCoder(use_cftime=use_cftime),
]:
var = coder.decode(var, name=name)
dimensions, data, attributes, encoding = variables.unpack_for_decoding(var)
# TODO(shoyer): convert everything below to use coders
if decode_endianness and not data.dtype.isnative:
# do this last, so it's only done if we didn't already unmask/scale
data = NativeEndiannessArray(data)
original_dtype = data.dtype
encoding.setdefault("dtype", original_dtype)
if "dtype" in attributes and attributes["dtype"] == "bool":
del attributes["dtype"]
data = BoolTypeArray(data)
if not isinstance(data, dask_array_type):
data = indexing.LazilyOuterIndexedArray(data)
return Variable(dimensions, data, attributes, encoding=encoding)
def _update_bounds_attributes(variables):
"""Adds time attributes to time bounds variables.
Variables handling time bounds ("Cell boundaries" in the CF
conventions) do not necessarily carry the necessary attributes to be
decoded. This copies the attributes from the time variable to the
associated boundaries.
See Also:
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/
cf-conventions.html#cell-boundaries
https://github.com/pydata/xarray/issues/2565
"""
# For all time variables with bounds
for v in variables.values():
attrs = v.attrs
has_date_units = "units" in attrs and "since" in attrs["units"]
if has_date_units and "bounds" in attrs:
if attrs["bounds"] in variables:
bounds_attrs = variables[attrs["bounds"]].attrs
bounds_attrs.setdefault("units", attrs["units"])
if "calendar" in attrs:
bounds_attrs.setdefault("calendar", attrs["calendar"])
def _update_bounds_encoding(variables):
"""Adds time encoding to time bounds variables.
Variables handling time bounds ("Cell boundaries" in the CF
conventions) do not necessarily carry the necessary attributes to be
decoded. This copies the encoding from the time variable to the
associated bounds variable so that we write CF-compliant files.
See Also:
http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/
cf-conventions.html#cell-boundaries
https://github.com/pydata/xarray/issues/2565
"""
# For all time variables with bounds
for v in variables.values():
attrs = v.attrs
encoding = v.encoding
has_date_units = "units" in encoding and "since" in encoding["units"]
is_datetime_type = np.issubdtype(
v.dtype, np.datetime64
) or contains_cftime_datetimes(v)
if (
is_datetime_type
and not has_date_units
and "bounds" in attrs
and attrs["bounds"] in variables
):
warnings.warn(
"Variable '{0}' has datetime type and a "
"bounds variable but {0}.encoding does not have "
"units specified. The units encodings for '{0}' "
"and '{1}' will be determined independently "
"and may not be equal, counter to CF-conventions. "
"If this is a concern, specify a units encoding for "
"'{0}' before writing to a file.".format(v.name, attrs["bounds"]),
UserWarning,
)
if has_date_units and "bounds" in attrs:
if attrs["bounds"] in variables:
bounds_encoding = variables[attrs["bounds"]].encoding
bounds_encoding.setdefault("units", encoding["units"])
if "calendar" in encoding:
bounds_encoding.setdefault("calendar", encoding["calendar"])
def decode_cf_variables(
variables,
attributes,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
decode_coords=True,
drop_variables=None,
use_cftime=None,
):
"""
Decode several CF encoded variables.
See: decode_cf_variable
"""
dimensions_used_by = defaultdict(list)
for v in variables.values():
for d in v.dims:
dimensions_used_by[d].append(v)
def stackable(dim):
# figure out if a dimension can be concatenated over
if dim in variables:
return False
for v in dimensions_used_by[dim]:
if v.dtype.kind != "S" or dim != v.dims[-1]:
return False
return True
coord_names = set()
if isinstance(drop_variables, str):
drop_variables = [drop_variables]
elif drop_variables is None:
drop_variables = []
drop_variables = set(drop_variables)
# Time bounds coordinates might miss the decoding attributes
if decode_times:
_update_bounds_attributes(variables)
new_vars = {}
for k, v in variables.items():
if k in drop_variables:
continue
stack_char_dim = (
concat_characters
and v.dtype == "S1"
and v.ndim > 0
and stackable(v.dims[-1])
)
new_vars[k] = decode_cf_variable(
k,
v,
concat_characters=concat_characters,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
stack_char_dim=stack_char_dim,
use_cftime=use_cftime,
)
if decode_coords:
var_attrs = new_vars[k].attrs
if "coordinates" in var_attrs:
coord_str = var_attrs["coordinates"]
var_coord_names = coord_str.split()
if all(k in variables for k in var_coord_names):
new_vars[k].encoding["coordinates"] = coord_str
del var_attrs["coordinates"]
coord_names.update(var_coord_names)
if decode_coords and "coordinates" in attributes:
attributes = dict(attributes)
coord_names.update(attributes.pop("coordinates").split())
return new_vars, attributes, coord_names
def decode_cf(
obj,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
decode_coords=True,
drop_variables=None,
use_cftime=None,
):
"""Decode the given Dataset or Datastore according to CF conventions into
a new Dataset.
Parameters
----------
obj : Dataset or DataStore
Object to decode.
concat_characters : bool, optional
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool, optional
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool, optional
Decode cf times (e.g., integers since 'hours since 2000-01-01') to
np.datetime64.
decode_coords : bool, optional
Use the 'coordinates' attribute on variable (or the dataset itself) to
identify coordinates.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
decoded : Dataset
"""
from .core.dataset import Dataset
from .backends.common import AbstractDataStore
if isinstance(obj, Dataset):
vars = obj._variables
attrs = obj.attrs
extra_coords = set(obj.coords)
file_obj = obj._file_obj
encoding = obj.encoding
elif isinstance(obj, AbstractDataStore):
vars, attrs = obj.load()
extra_coords = set()
file_obj = obj
encoding = obj.get_encoding()
else:
raise TypeError("can only decode Dataset or DataStore objects")
vars, attrs, coord_names = decode_cf_variables(
vars,
attrs,
concat_characters,
mask_and_scale,
decode_times,
decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
)
ds = Dataset(vars, attrs=attrs)
ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))
ds._file_obj = file_obj
ds.encoding = encoding
return ds
def cf_decoder(
variables,
attributes,
concat_characters=True,
mask_and_scale=True,
decode_times=True,
):
"""
Decode a set of CF encoded variables and attributes.
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
concat_characters : bool
Should character arrays be concatenated to strings, for
example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'
mask_and_scale: bool
Lazily scale (using scale_factor and add_offset) and mask
(using _FillValue).
decode_times : bool
Decode cf times ('hours since 2000-01-01') to np.datetime64.
Returns
-------
decoded_variables : dict
A dictionary mapping from variable name to xarray.Variable objects.
decoded_attributes : dict
A dictionary mapping from attribute name to values.
See also
--------
decode_cf_variable
"""
variables, attributes, _ = decode_cf_variables(
variables, attributes, concat_characters, mask_and_scale, decode_times
)
return variables, attributes
def _encode_coordinates(variables, attributes, non_dim_coord_names):
# calculate global and variable specific coordinates
non_dim_coord_names = set(non_dim_coord_names)
for name in list(non_dim_coord_names):
if isinstance(name, str) and " " in name:
warnings.warn(
"coordinate {!r} has a space in its name, which means it "
"cannot be marked as a coordinate on disk and will be "
"saved as a data variable instead".format(name),
SerializationWarning,
stacklevel=6,
)
non_dim_coord_names.discard(name)
global_coordinates = non_dim_coord_names.copy()
variable_coordinates = defaultdict(set)
for coord_name in non_dim_coord_names:
target_dims = variables[coord_name].dims
for k, v in variables.items():
if (
k not in non_dim_coord_names
and k not in v.dims
and set(target_dims) <= set(v.dims)
):
variable_coordinates[k].add(coord_name)
variables = {k: v.copy(deep=False) for k, v in variables.items()}
# keep track of variable names written to file under the "coordinates" attributes
written_coords = set()
for name, var in variables.items():
encoding = var.encoding
attrs = var.attrs
if "coordinates" in attrs and "coordinates" in encoding:
raise ValueError(
f"'coordinates' found in both attrs and encoding for variable {name!r}."
)
# this will copy coordinates from encoding to attrs if "coordinates" in attrs
# after the next line, "coordinates" is never in encoding
# we get support for attrs["coordinates"] for free.
coords_str = pop_to(encoding, attrs, "coordinates")
if not coords_str and variable_coordinates[name]:
attrs["coordinates"] = " ".join(map(str, variable_coordinates[name]))
if "coordinates" in attrs:
written_coords.update(attrs["coordinates"].split())
# These coordinates are not associated with any particular variables, so we
# save them under a global 'coordinates' attribute so xarray can roundtrip
# the dataset faithfully. Because this serialization goes beyond CF
# conventions, only do it if necessary.
# Reference discussion:
# http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/007571.html
global_coordinates.difference_update(written_coords)
if global_coordinates:
attributes = dict(attributes)
if "coordinates" in attributes:
warnings.warn(
f"cannot serialize global coordinates {global_coordinates!r} because the global "
f"attribute 'coordinates' already exists. This may prevent faithful roundtripping"
f"of xarray datasets",
SerializationWarning,
)
else:
attributes["coordinates"] = " ".join(map(str, global_coordinates))
return variables, attributes
def encode_dataset_coordinates(dataset):
"""Encode coordinates on the given dataset object into variable specific
and global attributes.
When possible, this is done according to CF conventions.
Parameters
----------
dataset : Dataset
Object to encode.
Returns
-------
variables : dict
attrs : dict
"""
non_dim_coord_names = set(dataset.coords) - set(dataset.dims)
return _encode_coordinates(
dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names
)
def cf_encoder(variables, attributes):
"""
Encode a set of CF encoded variables and attributes.
Takes a dicts of variables and attributes and encodes them
to conform to CF conventions as much as possible.
This includes masking, scaling, character array handling,
and CF-time encoding.
Parameters
----------
variables : dict
A dictionary mapping from variable name to xarray.Variable
attributes : dict
A dictionary mapping from attribute name to value
Returns
-------
encoded_variables : dict
A dictionary mapping from variable name to xarray.Variable,
encoded_attributes : dict
A dictionary mapping from attribute name to value
See also
--------
decode_cf_variable, encode_cf_variable
"""
# add encoding for time bounds variables if present.
_update_bounds_encoding(variables)
new_vars = {k: encode_cf_variable(v, name=k) for k, v in variables.items()}
# Remove attrs from bounds variables (issue #2921)
for var in new_vars.values():
bounds = var.attrs["bounds"] if "bounds" in var.attrs else None
if bounds and bounds in new_vars:
# see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries
for attr in [
"units",
"standard_name",
"axis",
"positive",
"calendar",
"long_name",
"leap_month",
"leap_year",
"month_lengths",
]:
if attr in new_vars[bounds].attrs and attr in var.attrs:
if new_vars[bounds].attrs[attr] == var.attrs[attr]:
new_vars[bounds].attrs.pop(attr)
return new_vars, attributes
|
apache-2.0
|
BenjaminW3/picongpu
|
src/tools/bin/printField.py
|
11
|
1151
|
#!/usr/bin/env python
#
# Copyright 2013 Richard Pausch
#
# This file is part of PIConGPU.
#
# PIConGPU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PIConGPU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PIConGPU.
# If not, see <http://www.gnu.org/licenses/>.
#
from numpy import *
import sys
data = loadtxt(sys.argv[1], dtype=str)
format = data.shape
data = data.flatten()
for i in xrange(data.size):
data[i] = data[i].replace(",", " ")
data = data.astype(float)
data = data.reshape((format[0], format[1] / 3, 3))
dataAbs = sqrt(data[:,:,0]**2 + data[:,:,1]**2 + data[:,:,2]**2)
import matplotlib.pyplot as plt
plt.imshow(dataAbs, interpolation='nearest')
plt.colorbar()
plt.show()
|
gpl-3.0
|
cbmoore/statsmodels
|
statsmodels/tsa/tests/test_arima.py
|
9
|
89483
|
import warnings
from statsmodels.compat.python import lrange, BytesIO
import numpy as np
from nose.tools import nottest
from numpy.testing import (assert_almost_equal, assert_, assert_allclose,
assert_raises, dec, TestCase)
from statsmodels.tools.testing import assert_equal
import statsmodels.sandbox.tsa.fftarma as fa
from statsmodels.tsa.arma_mle import Arma
from statsmodels.tsa.arima_model import ARMA, ARIMA
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.base.datetools import dates_from_range
from .results import results_arma, results_arima
import os
from statsmodels.tsa.arima_process import arma_generate_sample
from statsmodels.datasets.macrodata import load as load_macrodata
from statsmodels.datasets.macrodata import load_pandas as load_macrodata_pandas
import pandas
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
current_path = os.path.dirname(os.path.abspath(__file__))
y_arma = np.genfromtxt(open(current_path + '/results/y_arma_data.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
cpi_dates = dates_from_range('1959Q1', '2009Q3')
cpi_predict_dates = dates_from_range('2009Q3', '2015Q4')
sun_dates = dates_from_range('1700', '2008')
sun_predict_dates = dates_from_range('2008', '2033')
from pandas import DatetimeIndex # pylint: disable-msg=E0611
cpi_dates = DatetimeIndex(cpi_dates, freq='infer')
sun_dates = DatetimeIndex(sun_dates, freq='infer')
cpi_predict_dates = DatetimeIndex(cpi_predict_dates, freq='infer')
sun_predict_dates = DatetimeIndex(sun_predict_dates, freq='infer')
def test_compare_arma():
#this is a preliminary test to compare arma_kf, arma_cond_ls and arma_cond_mle
#the results returned by the fit methods are incomplete
#for now without random.seed
np.random.seed(9876565)
x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(nsample=200,
burnin=1000)
# this used kalman filter through descriptive
#d = ARMA(x)
#d.fit((1,1), trend='nc')
#dres = d.res
modkf = ARMA(x, (1,1))
##rkf = mkf.fit((1,1))
##rkf.params
reskf = modkf.fit(trend='nc', disp=-1)
dres = reskf
modc = Arma(x)
resls = modc.fit(order=(1,1))
rescm = modc.fit_mle(order=(1,1), start_params=[0.4,0.4, 1.], disp=0)
#decimal 1 corresponds to threshold of 5% difference
#still different sign corrcted
#assert_almost_equal(np.abs(resls[0] / d.params), np.ones(d.params.shape), decimal=1)
assert_almost_equal(resls[0] / dres.params, np.ones(dres.params.shape),
decimal=1)
#rescm also contains variance estimate as last element of params
#assert_almost_equal(np.abs(rescm.params[:-1] / d.params), np.ones(d.params.shape), decimal=1)
assert_almost_equal(rescm.params[:-1] / dres.params,
np.ones(dres.params.shape), decimal=1)
#return resls[0], d.params, rescm.params
class CheckArmaResultsMixin(object):
"""
res2 are the results from gretl. They are in results/results_arma.
res1 are from statsmodels
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_arroots = DECIMAL_4
def test_arroots(self):
assert_almost_equal(self.res1.arroots, self.res2.arroots,
self.decimal_arroots)
decimal_maroots = DECIMAL_4
def test_maroots(self):
assert_almost_equal(self.res1.maroots, self.res2.maroots,
self.decimal_maroots)
decimal_bse = DECIMAL_2
def test_bse(self):
assert_almost_equal(self.res1.bse, self.res2.bse, self.decimal_bse)
decimal_cov_params = DECIMAL_4
def test_covparams(self):
assert_almost_equal(self.res1.cov_params(), self.res2.cov_params,
self.decimal_cov_params)
decimal_hqic = DECIMAL_4
def test_hqic(self):
assert_almost_equal(self.res1.hqic, self.res2.hqic, self.decimal_hqic)
decimal_llf = DECIMAL_4
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_llf)
decimal_resid = DECIMAL_4
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
decimal_pvalues = DECIMAL_2
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_t = DECIMAL_2 # only 2 decimal places in gretl output
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues,
self.decimal_t)
decimal_sigma2 = DECIMAL_4
def test_sigma2(self):
assert_almost_equal(self.res1.sigma2, self.res2.sigma2,
self.decimal_sigma2)
def test_summary(self):
# smoke tests
table = self.res1.summary()
class CheckForecastMixin(object):
decimal_forecast = DECIMAL_4
def test_forecast(self):
assert_almost_equal(self.res1.forecast_res, self.res2.forecast,
self.decimal_forecast)
decimal_forecasterr = DECIMAL_4
def test_forecasterr(self):
assert_almost_equal(self.res1.forecast_err, self.res2.forecasterr,
self.decimal_forecasterr)
class CheckDynamicForecastMixin(object):
decimal_forecast_dyn = 4
def test_dynamic_forecast(self):
assert_almost_equal(self.res1.forecast_res_dyn, self.res2.forecast_dyn,
self.decimal_forecast_dyn)
#def test_forecasterr(self):
# assert_almost_equal(self.res1.forecast_err_dyn,
# self.res2.forecasterr_dyn,
# DECIMAL_4)
class CheckArimaResultsMixin(CheckArmaResultsMixin):
def test_order(self):
assert self.res1.k_diff == self.res2.k_diff
assert self.res1.k_ar == self.res2.k_ar
assert self.res1.k_ma == self.res2.k_ma
decimal_predict_levels = DECIMAL_4
def test_predict_levels(self):
assert_almost_equal(self.res1.predict(typ='levels'), self.res2.linear,
self.decimal_predict_levels)
class Test_Y_ARMA11_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,0]
cls.res1 = ARMA(endog, order=(1,1)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11()
def test_pickle(self):
fh = BytesIO()
#test wrapped results load save pickle
self.res1.save(fh)
fh.seek(0,0)
res_unpickled = self.res1.__class__.load(fh)
assert_(type(res_unpickled) is type(self.res1))
class Test_Y_ARMA14_NoConst(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,1]
cls.res1 = ARMA(endog, order=(1,4)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma14()
@dec.slow
class Test_Y_ARMA41_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,2]
cls.res1 = ARMA(endog, order=(4,1)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma41()
cls.decimal_maroots = DECIMAL_3
class Test_Y_ARMA22_NoConst(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,3]
cls.res1 = ARMA(endog, order=(2,2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma22()
class Test_Y_ARMA50_NoConst(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,4]
cls.res1 = ARMA(endog, order=(5,0)).fit(trend='nc', disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50()
class Test_Y_ARMA02_NoConst(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,5]
cls.res1 = ARMA(endog, order=(0,2)).fit(trend='nc', disp=-1)
cls.res2 = results_arma.Y_arma02()
class Test_Y_ARMA11_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,6]
cls.res1 = ARMA(endog, order=(1,1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
class Test_Y_ARMA14_Const(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,7]
cls.res1 = ARMA(endog, order=(1,4)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma14c()
class Test_Y_ARMA41_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,8]
cls.res2 = results_arma.Y_arma41c()
cls.res1 = ARMA(endog, order=(4,1)).fit(trend="c", disp=-1,
start_params=cls.res2.params)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.decimal_cov_params = DECIMAL_3
cls.decimal_fittedvalues = DECIMAL_3
cls.decimal_resid = DECIMAL_3
cls.decimal_params = DECIMAL_3
class Test_Y_ARMA22_Const(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,9]
cls.res1 = ARMA(endog, order=(2,2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma22c()
class Test_Y_ARMA50_Const(CheckArmaResultsMixin, CheckForecastMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,10]
cls.res1 = ARMA(endog, order=(5,0)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma50c()
class Test_Y_ARMA02_Const(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,11]
cls.res1 = ARMA(endog, order=(0,2)).fit(trend="c", disp=-1)
cls.res2 = results_arma.Y_arma02c()
# cov_params and tvalues are off still but not as much vs. R
class Test_Y_ARMA11_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,0]
cls.res1 = ARMA(endog, order=(1,1)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma11("css")
cls.decimal_t = DECIMAL_1
# better vs. R
class Test_Y_ARMA14_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,1]
cls.res1 = ARMA(endog, order=(1,4)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma14("css")
cls.decimal_fittedvalues = DECIMAL_3
cls.decimal_resid = DECIMAL_3
cls.decimal_t = DECIMAL_1
# bse, etc. better vs. R
# maroot is off because maparams is off a bit (adjust tolerance?)
class Test_Y_ARMA41_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,2]
cls.res1 = ARMA(endog, order=(4,1)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma41("css")
cls.decimal_t = DECIMAL_1
cls.decimal_pvalues = 0
cls.decimal_cov_params = DECIMAL_3
cls.decimal_maroots = DECIMAL_1
#same notes as above
class Test_Y_ARMA22_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,3]
cls.res1 = ARMA(endog, order=(2,2)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma22("css")
cls.decimal_t = DECIMAL_1
cls.decimal_resid = DECIMAL_3
cls.decimal_pvalues = DECIMAL_1
cls.decimal_fittedvalues = DECIMAL_3
#NOTE: gretl just uses least squares for AR CSS
# so BIC, etc. is
# -2*res1.llf + np.log(nobs)*(res1.q+res1.p+res1.k)
# with no adjustment for p and no extra sigma estimate
#NOTE: so our tests use x-12 arima results which agree with us and are
# consistent with the rest of the models
class Test_Y_ARMA50_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,4]
cls.res1 = ARMA(endog, order=(5,0)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma50("css")
cls.decimal_t = 0
cls.decimal_llf = DECIMAL_1 # looks like rounding error?
class Test_Y_ARMA02_NoConst_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,5]
cls.res1 = ARMA(endog, order=(0,2)).fit(method="css", trend='nc',
disp=-1)
cls.res2 = results_arma.Y_arma02("css")
#NOTE: our results are close to --x-12-arima option and R
class Test_Y_ARMA11_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,6]
cls.res1 = ARMA(endog, order=(1,1)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma11c("css")
cls.decimal_params = DECIMAL_3
cls.decimal_cov_params = DECIMAL_3
cls.decimal_t = DECIMAL_1
class Test_Y_ARMA14_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,7]
cls.res1 = ARMA(endog, order=(1,4)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma14c("css")
cls.decimal_t = DECIMAL_1
cls.decimal_pvalues = DECIMAL_1
class Test_Y_ARMA41_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,8]
cls.res1 = ARMA(endog, order=(4,1)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma41c("css")
cls.decimal_t = DECIMAL_1
cls.decimal_cov_params = DECIMAL_1
cls.decimal_maroots = DECIMAL_3
cls.decimal_bse = DECIMAL_1
class Test_Y_ARMA22_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,9]
cls.res1 = ARMA(endog, order=(2,2)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma22c("css")
cls.decimal_t = 0
cls.decimal_pvalues = DECIMAL_1
class Test_Y_ARMA50_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,10]
cls.res1 = ARMA(endog, order=(5,0)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma50c("css")
cls.decimal_t = DECIMAL_1
cls.decimal_params = DECIMAL_3
cls.decimal_cov_params = DECIMAL_2
class Test_Y_ARMA02_Const_CSS(CheckArmaResultsMixin):
@classmethod
def setupClass(cls):
endog = y_arma[:,11]
cls.res1 = ARMA(endog, order=(0,2)).fit(trend="c", method="css",
disp=-1)
cls.res2 = results_arma.Y_arma02c("css")
def test_reset_trend():
endog = y_arma[:,0]
mod = ARMA(endog, order=(1,1))
res1 = mod.fit(trend="c", disp=-1)
res2 = mod.fit(trend="nc", disp=-1)
assert_equal(len(res1.params), len(res2.params)+1)
@dec.slow
def test_start_params_bug():
data = np.array([1368., 1187, 1090, 1439, 2362, 2783, 2869, 2512, 1804,
1544, 1028, 869, 1737, 2055, 1947, 1618, 1196, 867, 997, 1862, 2525,
3250, 4023, 4018, 3585, 3004, 2500, 2441, 2749, 2466, 2157, 1847, 1463,
1146, 851, 993, 1448, 1719, 1709, 1455, 1950, 1763, 2075, 2343, 3570,
4690, 3700, 2339, 1679, 1466, 998, 853, 835, 922, 851, 1125, 1299, 1105,
860, 701, 689, 774, 582, 419, 846, 1132, 902, 1058, 1341, 1551, 1167,
975, 786, 759, 751, 649, 876, 720, 498, 553, 459, 543, 447, 415, 377,
373, 324, 320, 306, 259, 220, 342, 558, 825, 994, 1267, 1473, 1601,
1896, 1890, 2012, 2198, 2393, 2825, 3411, 3406, 2464, 2891, 3685, 3638,
3746, 3373, 3190, 2681, 2846, 4129, 5054, 5002, 4801, 4934, 4903, 4713,
4745, 4736, 4622, 4642, 4478, 4510, 4758, 4457, 4356, 4170, 4658, 4546,
4402, 4183, 3574, 2586, 3326, 3948, 3983, 3997, 4422, 4496, 4276, 3467,
2753, 2582, 2921, 2768, 2789, 2824, 2482, 2773, 3005, 3641, 3699, 3774,
3698, 3628, 3180, 3306, 2841, 2014, 1910, 2560, 2980, 3012, 3210, 3457,
3158, 3344, 3609, 3327, 2913, 2264, 2326, 2596, 2225, 1767, 1190, 792,
669, 589, 496, 354, 246, 250, 323, 495, 924, 1536, 2081, 2660, 2814, 2992,
3115, 2962, 2272, 2151, 1889, 1481, 955, 631, 288, 103, 60, 82, 107, 185,
618, 1526, 2046, 2348, 2584, 2600, 2515, 2345, 2351, 2355, 2409, 2449,
2645, 2918, 3187, 2888, 2610, 2740, 2526, 2383, 2936, 2968, 2635, 2617,
2790, 3906, 4018, 4797, 4919, 4942, 4656, 4444, 3898, 3908, 3678, 3605,
3186, 2139, 2002, 1559, 1235, 1183, 1096, 673, 389, 223, 352, 308, 365,
525, 779, 894, 901, 1025, 1047, 981, 902, 759, 569, 519, 408, 263, 156,
72, 49, 31, 41, 192, 423, 492, 552, 564, 723, 921, 1525, 2768, 3531, 3824,
3835, 4294, 4533, 4173, 4221, 4064, 4641, 4685, 4026, 4323, 4585, 4836,
4822, 4631, 4614, 4326, 4790, 4736, 4104, 5099, 5154, 5121, 5384, 5274,
5225, 4899, 5382, 5295, 5349, 4977, 4597, 4069, 3733, 3439, 3052, 2626,
1939, 1064, 713, 916, 832, 658, 817, 921, 772, 764, 824, 967, 1127, 1153,
824, 912, 957, 990, 1218, 1684, 2030, 2119, 2233, 2657, 2652, 2682, 2498,
2429, 2346, 2298, 2129, 1829, 1816, 1225, 1010, 748, 627, 469, 576, 532,
475, 582, 641, 605, 699, 680, 714, 670, 666, 636, 672, 679, 446, 248, 134,
160, 178, 286, 413, 676, 1025, 1159, 952, 1398, 1833, 2045, 2072, 1798,
1799, 1358, 727, 353, 347, 844, 1377, 1829, 2118, 2272, 2745, 4263, 4314,
4530, 4354, 4645, 4547, 5391, 4855, 4739, 4520, 4573, 4305, 4196, 3773,
3368, 2596, 2596, 2305, 2756, 3747, 4078, 3415, 2369, 2210, 2316, 2263,
2672, 3571, 4131, 4167, 4077, 3924, 3738, 3712, 3510, 3182, 3179, 2951,
2453, 2078, 1999, 2486, 2581, 1891, 1997, 1366, 1294, 1536, 2794, 3211,
3242, 3406, 3121, 2425, 2016, 1787, 1508, 1304, 1060, 1342, 1589, 2361,
3452, 2659, 2857, 3255, 3322, 2852, 2964, 3132, 3033, 2931, 2636, 2818,
3310, 3396, 3179, 3232, 3543, 3759, 3503, 3758, 3658, 3425, 3053, 2620,
1837, 923, 712, 1054, 1376, 1556, 1498, 1523, 1088, 728, 890, 1413, 2524,
3295, 4097, 3993, 4116, 3874, 4074, 4142, 3975, 3908, 3907, 3918, 3755,
3648, 3778, 4293, 4385, 4360, 4352, 4528, 4365, 3846, 4098, 3860, 3230,
2820, 2916, 3201, 3721, 3397, 3055, 2141, 1623, 1825, 1716, 2232, 2939,
3735, 4838, 4560, 4307, 4975, 5173, 4859, 5268, 4992, 5100, 5070, 5270,
4760, 5135, 5059, 4682, 4492, 4933, 4737, 4611, 4634, 4789, 4811, 4379,
4689, 4284, 4191, 3313, 2770, 2543, 3105, 2967, 2420, 1996, 2247, 2564,
2726, 3021, 3427, 3509, 3759, 3324, 2988, 2849, 2340, 2443, 2364, 1252,
623, 742, 867, 684, 488, 348, 241, 187, 279, 355, 423, 678, 1375, 1497,
1434, 2116, 2411, 1929, 1628, 1635, 1609, 1757, 2090, 2085, 1790, 1846,
2038, 2360, 2342, 2401, 2920, 3030, 3132, 4385, 5483, 5865, 5595, 5485,
5727, 5553, 5560, 5233, 5478, 5159, 5155, 5312, 5079, 4510, 4628, 4535,
3656, 3698, 3443, 3146, 2562, 2304, 2181, 2293, 1950, 1930, 2197, 2796,
3441, 3649, 3815, 2850, 4005, 5305, 5550, 5641, 4717, 5131, 2831, 3518,
3354, 3115, 3515, 3552, 3244, 3658, 4407, 4935, 4299, 3166, 3335, 2728,
2488, 2573, 2002, 1717, 1645, 1977, 2049, 2125, 2376, 2551, 2578, 2629,
2750, 3150, 3699, 4062, 3959, 3264, 2671, 2205, 2128, 2133, 2095, 1964,
2006, 2074, 2201, 2506, 2449, 2465, 2064, 1446, 1382, 983, 898, 489, 319,
383, 332, 276, 224, 144, 101, 232, 429, 597, 750, 908, 960, 1076, 951,
1062, 1183, 1404, 1391, 1419, 1497, 1267, 963, 682, 777, 906, 1149, 1439,
1600, 1876, 1885, 1962, 2280, 2711, 2591, 2411])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = ARMA(data, order=(4,1)).fit(disp=-1)
class Test_ARIMA101(CheckArmaResultsMixin):
# just make sure this works
@classmethod
def setupClass(cls):
endog = y_arma[:,6]
cls.res1 = ARIMA(endog, (1,0,1)).fit(trend="c", disp=-1)
(cls.res1.forecast_res, cls.res1.forecast_err,
confint) = cls.res1.forecast(10)
cls.res2 = results_arma.Y_arma11c()
cls.res2.k_diff = 0
cls.res2.k_ar = 1
cls.res2.k_ma = 1
class Test_ARIMA111(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
@classmethod
def setupClass(cls):
cpi = load_macrodata().data['cpi']
cls.res1 = ARIMA(cpi, (1,1,1)).fit(disp=-1)
cls.res2 = results_arima.ARIMA111()
# make sure endog names changes to D.cpi
cls.decimal_llf = 3
cls.decimal_aic = 3
cls.decimal_bic = 3
cls.decimal_cov_params = 2 # this used to be better?
cls.decimal_t = 0
(cls.res1.forecast_res,
cls.res1.forecast_err,
conf_int) = cls.res1.forecast(25)
#cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=226, typ='levels', dynamic=True)
#TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164+63,
typ='levels', dynamic=True)
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.0000], 4)
assert_almost_equal(self.res1.mafreq, [0.0000], 4)
class Test_ARIMA111CSS(CheckArimaResultsMixin, CheckForecastMixin,
CheckDynamicForecastMixin):
@classmethod
def setupClass(cls):
cpi = load_macrodata().data['cpi']
cls.res1 = ARIMA(cpi, (1,1,1)).fit(disp=-1, method='css')
cls.res2 = results_arima.ARIMA111(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
(cls.res1.forecast_res,
cls.res1.forecast_err,
conf_int) = cls.res1.forecast(25)
cls.decimal_forecast = 2
cls.decimal_forecast_dyn = 2
cls.decimal_forecasterr = 3
cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=164+63,
typ='levels', dynamic=True)
# precisions
cls.decimal_arroots = 3
cls.decimal_cov_params = 3
cls.decimal_hqic = 3
cls.decimal_maroots = 3
cls.decimal_t = 1
cls.decimal_fittedvalues = 2 # because of rounding when copying
cls.decimal_resid = 2
#cls.decimal_llf = 3
#cls.decimal_aic = 3
#cls.decimal_bic = 3
cls.decimal_predict_levels = DECIMAL_2
class Test_ARIMA112CSS(CheckArimaResultsMixin):
@classmethod
def setupClass(cls):
cpi = load_macrodata().data['cpi']
cls.res1 = ARIMA(cpi, (1,1,2)).fit(disp=-1, method='css',
start_params = [.905322, -.692425, 1.07366,
0.172024])
cls.res2 = results_arima.ARIMA112(method='css')
cls.res2.fittedvalues = - cpi[1:-1] + cls.res2.linear
# make sure endog names changes to D.cpi
cls.decimal_llf = 3
cls.decimal_aic = 3
cls.decimal_bic = 3
#(cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
#cls.res1.forecast_res_dyn = cls.res1.predict(start=164, end=226, typ='levels', dynamic=True)
#TODO: fix the indexing for the end here, I don't think this is right
# if we're going to treat it like indexing
# the forecast from 2005Q1 through 2009Q4 is indices
# 184 through 227 not 226
# note that the first one counts in the count so 164 + 64 is 65
# predictions
#cls.res1.forecast_res_dyn = self.predict(start=164, end=164+63,
# typ='levels', dynamic=True)
# since we got from gretl don't have linear prediction in differences
cls.decimal_arroots = 3
cls.decimal_maroots = 2
cls.decimal_t = 1
cls.decimal_resid = 2
cls.decimal_fittedvalues = 3
cls.decimal_predict_levels = DECIMAL_3
def test_freq(self):
assert_almost_equal(self.res1.arfreq, [0.5000], 4)
assert_almost_equal(self.res1.mafreq, [0.5000, 0.5000], 4)
#class Test_ARIMADates(CheckArmaResults, CheckForecast, CheckDynamicForecast):
# @classmethod
# def setupClass(cls):
# from statsmodels.tsa.datetools import dates_from_range
#
# cpi = load_macrodata().data['cpi']
# dates = dates_from_range('1959q1', length=203)
# cls.res1 = ARIMA(cpi, dates=dates, freq='Q').fit(order=(1,1,1), disp=-1)
# cls.res2 = results_arima.ARIMA111()
# # make sure endog names changes to D.cpi
# cls.decimal_llf = 3
# cls.decimal_aic = 3
# cls.decimal_bic = 3
# (cls.res1.forecast_res,
# cls.res1.forecast_err,
# conf_int) = cls.res1.forecast(25)
def test_arima_predict_mle_dates():
cpi = load_macrodata().data['cpi']
res1 = ARIMA(cpi, (4,1,1), dates=cpi_dates, freq='Q').fit(disp=-1)
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_mle.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
start, end = 2, 51
fv = res1.predict('1959Q3', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_dates[start:end+1])
start, end = 202, 227
fv = res1.predict('2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_predict_dates)
# make sure dynamic works
start, end = '1960q2', '1971q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:51+1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227+1], DECIMAL_4)
def test_arma_predict_mle_dates():
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
mod = ARMA(sunspots, (9,0), dates=sun_dates, freq='A')
mod.method = 'mle'
assert_raises(ValueError, mod._get_predict_start, *('1701', True))
start, end = 2, 51
_ = mod._get_predict_start('1702', False)
_ = mod._get_predict_end('1751')
assert_equal(mod.data.predict_dates, sun_dates[start:end+1])
start, end = 308, 333
_ = mod._get_predict_start('2008', False)
_ = mod._get_predict_end('2033')
assert_equal(mod.data.predict_dates, sun_predict_dates)
def test_arima_predict_css_dates():
cpi = load_macrodata().data['cpi']
res1 = ARIMA(cpi, (4,1,1), dates=cpi_dates, freq='Q').fit(disp=-1,
method='css', trend='nc')
params = np.array([ 1.231272508473910,
-0.282516097759915,
0.170052755782440,
-0.118203728504945,
-0.938783134717947])
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_css.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_dates[start:end+1])
start, end = 202, 227
fv = res1.model.predict(params, '2009Q3', '2015Q4', typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
assert_equal(res1.data.predict_dates, cpi_predict_dates)
# make sure dynamic works
start, end = 5, 51
fv = res1.model.predict(params, '1960Q2', '1971Q4', typ='levels',
dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
start, end = '1965q1', '2015q4'
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[24:227+1], DECIMAL_4)
def test_arma_predict_css_dates():
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
mod = ARMA(sunspots, (9,0), dates=sun_dates, freq='A')
mod.method = 'css'
assert_raises(ValueError, mod._get_predict_start, *('1701', False))
def test_arima_predict_mle():
cpi = load_macrodata().data['cpi']
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1)
# fit the model so that we get correct endog length but use
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_mle.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
# 0 indicates the first sample-observation below
# ie., the index after the pre-sample, these are also differenced once
# so the indices are moved back once from the cpi in levels
# start < p, end <p 1959q2 - 1959q4
start, end = 1,3
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end 0 1959q3 - 1960q1
start, end = 2, 4
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end >0 1959q3 - 1971q4
start, end = 2, 51
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end nobs 1959q3 - 2009q3
start, end = 2, 202
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start < p, end >nobs 1959q3 - 2015q4
start, end = 2, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >0 1960q1 - 1971q4
start, end = 4, 51
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 4, 202
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 4, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
#NOTE: raises
#start, end = 202, 202
#fv = res1.predict(start, end, typ='levels')
#assert_almost_equal(fv, [])
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_3)
# start >nobs, end >nobs 2009q4 - 2015q4
#NOTE: this raises but shouldn't, dynamic forecasts could start
#one period out
start, end = 203, 227
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.predict(start, end, typ='levels')
assert_almost_equal(fv, fc[1:203], DECIMAL_4)
#### Dynamic #####
# start < p, end <p 1959q2 - 1959q4
#NOTE: should raise
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, arima_forecasts[:,15])
# start < p, end 0 1959q3 - 1960q1
#NOTE: below should raise an error
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
# start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True, typ='levels')
#assert_almost_equal(fv, fcdyn[5:end+1], DECIMAL_4)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.predict(start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def _check_start(model, given, expected, dynamic):
start = model._get_predict_start(given, dynamic)
assert_equal(start, expected)
def _check_end(model, given, end_expect, out_of_sample_expect):
end, out_of_sample = model._get_predict_end(given)
assert_equal((end, out_of_sample), (end_expect, out_of_sample_expect))
def test_arma_predict_indices():
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
model = ARMA(sunspots, (9,0), dates=sun_dates, freq='A')
model.method = 'mle'
# raises - pre-sample + dynamic
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(8, True))
assert_raises(ValueError, model._get_predict_start, *('1700', True))
assert_raises(ValueError, model._get_predict_start, *('1708', True))
# raises - start out of sample
assert_raises(ValueError, model._get_predict_start, *(311, True))
assert_raises(ValueError, model._get_predict_start, *(311, False))
assert_raises(ValueError, model._get_predict_start, *('2010', True))
assert_raises(ValueError, model._get_predict_start, *('2010', False))
# works - in-sample
# None
# given, expected, dynamic
start_test_cases = [
(None, 9, True),
# all start get moved back by k_diff
(9, 9, True),
(10, 10, True),
# what about end of sample start - last value is first
# forecast
(309, 309, True),
(308, 308, True),
(0, 0, False),
(1, 1, False),
(4, 4, False),
# all start get moved back by k_diff
('1709', 9, True),
('1710', 10, True),
# what about end of sample start - last value is first
# forecast
('2008', 308, True),
('2009', 309, True),
('1700', 0, False),
('1708', 8, False),
('1709', 9, False),
]
for case in start_test_cases:
_check_start(*((model,) + case))
# the length of sunspot is 309, so last index is 208
end_test_cases = [(None, 308, 0),
(307, 307, 0),
(308, 308, 0),
(309, 308, 1),
(312, 308, 4),
(51, 51, 0),
(333, 308, 25),
('2007', 307, 0),
('2008', 308, 0),
('2009', 308, 1),
('2012', 308, 4),
('1815', 115, 0),
('2033', 308, 25),
]
for case in end_test_cases:
_check_end(*((model,)+case))
def test_arima_predict_indices():
cpi = load_macrodata().data['cpi']
model = ARIMA(cpi, (4,1,1), dates=cpi_dates, freq='Q')
model.method = 'mle'
# starting indices
# raises - pre-sample + dynamic
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(4, True))
assert_raises(ValueError, model._get_predict_start, *('1959Q1', True))
assert_raises(ValueError, model._get_predict_start, *('1960Q1', True))
# raises - index differenced away
assert_raises(ValueError, model._get_predict_start, *(0, False))
assert_raises(ValueError, model._get_predict_start, *('1959Q1', False))
# raises - start out of sample
assert_raises(ValueError, model._get_predict_start, *(204, True))
assert_raises(ValueError, model._get_predict_start, *(204, False))
assert_raises(ValueError, model._get_predict_start, *('2010Q1', True))
assert_raises(ValueError, model._get_predict_start, *('2010Q1', False))
# works - in-sample
# None
# given, expected, dynamic
start_test_cases = [
(None, 4, True),
# all start get moved back by k_diff
(5, 4, True),
(6, 5, True),
# what about end of sample start - last value is first
# forecast
(203, 202, True),
(1, 0, False),
(4, 3, False),
(5, 4, False),
# all start get moved back by k_diff
('1960Q2', 4, True),
('1960Q3', 5, True),
# what about end of sample start - last value is first
# forecast
('2009Q4', 202, True),
('1959Q2', 0, False),
('1960Q1', 3, False),
('1960Q2', 4, False),
]
for case in start_test_cases:
_check_start(*((model,) + case))
# check raises
#TODO: make sure dates are passing through unmolested
#assert_raises(ValueError, model._get_predict_end, ("2001-1-1",))
# the length of diff(cpi) is 202, so last index is 201
end_test_cases = [(None, 201, 0),
(201, 200, 0),
(202, 201, 0),
(203, 201, 1),
(204, 201, 2),
(51, 50, 0),
(164+63, 201, 25),
('2009Q2', 200, 0),
('2009Q3', 201, 0),
('2009Q4', 201, 1),
('2010Q1', 201, 2),
('1971Q4', 50, 0),
('2015Q4', 201, 25),
]
for case in end_test_cases:
_check_end(*((model,)+case))
# check higher k_diff
model.k_diff = 2
# raises - pre-sample + dynamic
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(5, True))
assert_raises(ValueError, model._get_predict_start, *('1959Q1', True))
assert_raises(ValueError, model._get_predict_start, *('1960Q1', True))
# raises - index differenced away
assert_raises(ValueError, model._get_predict_start, *(1, False))
assert_raises(ValueError, model._get_predict_start, *('1959Q2', False))
start_test_cases = [(None, 4, True),
# all start get moved back by k_diff
(6, 4, True),
# what about end of sample start - last value is first
# forecast
(203, 201, True),
(2, 0, False),
(4, 2, False),
(5, 3, False),
('1960Q3', 4, True),
# what about end of sample start - last value is first
# forecast
('2009Q4', 201, True),
('2009Q4', 201, True),
('1959Q3', 0, False),
('1960Q1', 2, False),
('1960Q2', 3, False),
]
for case in start_test_cases:
_check_start(*((model,)+case))
end_test_cases = [(None, 200, 0),
(201, 199, 0),
(202, 200, 0),
(203, 200, 1),
(204, 200, 2),
(51, 49, 0),
(164+63, 200, 25),
('2009Q2', 199, 0),
('2009Q3', 200, 0),
('2009Q4', 200, 1),
('2010Q1', 200, 2),
('1971Q4', 49, 0),
('2015Q4', 200, 25),
]
for case in end_test_cases:
_check_end(*((model,)+case))
def test_arima_predict_indices_css():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
model = ARIMA(cpi, (4,1,1))
model.method = 'css'
assert_raises(ValueError, model._get_predict_start, *(0, False))
assert_raises(ValueError, model._get_predict_start, *(0, True))
assert_raises(ValueError, model._get_predict_start, *(2, False))
assert_raises(ValueError, model._get_predict_start, *(2, True))
def test_arima_predict_css():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1, method="css",
trend="nc")
# but use gretl parameters to predict to avoid precision problems
params = np.array([ 1.231272508473910,
-0.282516097759915,
0.170052755782440,
-0.118203728504945,
-0.938783134717947])
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_css.csv', "rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
#NOTE: should raise
#start, end = 1,3
#fv = res1.model.predict(params, start, end)
## start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.model.predict(params, start, end)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.model.predict(params, start, end)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.model.predict(params, start, end)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.model.predict(params, start, end)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
#TODO: why detoriating precision?
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, typ='levels')
assert_almost_equal(fv, fc[5:203], DECIMAL_4)
#### Dynamic #####
#NOTE: should raise
# start < p, end <p 1959q2 - 1959q4
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True)
# start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, dynamic=True, typ='levels')
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def test_arima_predict_css_diffs():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1, method="css",
trend="c")
# but use gretl parameters to predict to avoid precision problems
params = np.array([0.78349893861244,
-0.533444105973324,
0.321103691668809,
0.264012463189186,
0.107888256920655,
0.920132542916995])
# we report mean, should we report constant?
params[0] = params[0] / (1 - params[1:5].sum())
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_css_diff.csv',
"rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
#NOTE: should raise
#start, end = 1,3
#fv = res1.model.predict(params, start, end)
## start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.model.predict(params, start, end)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.model.predict(params, start, end)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.model.predict(params, start, end)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.model.predict(params, start, end)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
#TODO: why detoriating precision?
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[5:203], DECIMAL_4)
#### Dynamic #####
#NOTE: should raise
# start < p, end <p 1959q2 - 1959q4
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True)
# start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, dynamic=True)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def test_arima_predict_mle_diffs():
cpi = load_macrodata().data['cpi']
#NOTE: Doing no-constant for now to kick the conditional exogenous
#issue 274 down the road
# go ahead and git the model to set up necessary variables
res1 = ARIMA(cpi, (4,1,1)).fit(disp=-1, trend="c")
# but use gretl parameters to predict to avoid precision problems
params = np.array([0.926875951549299,
-0.555862621524846,
0.320865492764400,
0.252253019082800,
0.113624958031799,
0.939144026934634])
arima_forecasts = np.genfromtxt(open(
current_path + '/results/results_arima_forecasts_all_mle_diff.csv',
"rb"),
delimiter=",", skip_header=1, dtype=float)
fc = arima_forecasts[:,0]
fcdyn = arima_forecasts[:,1]
fcdyn2 = arima_forecasts[:,2]
fcdyn3 = arima_forecasts[:,3]
fcdyn4 = arima_forecasts[:,4]
#NOTE: should raise
start, end = 1,3
fv = res1.model.predict(params, start, end)
## start < p, end 0 1959q3 - 1960q1
start, end = 2, 4
fv = res1.model.predict(params, start, end)
## start < p, end >0 1959q3 - 1971q4
start, end = 2, 51
fv = res1.model.predict(params, start, end)
## start < p, end nobs 1959q3 - 2009q3
start, end = 2, 202
fv = res1.model.predict(params, start, end)
## start < p, end >nobs 1959q3 - 2015q4
start, end = 2, 227
fv = res1.model.predict(params, start, end)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
#TODO: why detoriating precision?
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end)
assert_almost_equal(fv, fc[1:203], DECIMAL_4)
#### Dynamic #####
#NOTE: should raise
# start < p, end <p 1959q2 - 1959q4
#start, end = 1,3
#fv = res1.predict(start, end, dynamic=True)
# start < p, end 0 1959q3 - 1960q1
#start, end = 2, 4
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >0 1959q3 - 1971q4
#start, end = 2, 51
#fv = res1.predict(start, end, dynamic=True)
## start < p, end nobs 1959q3 - 2009q3
#start, end = 2, 202
#fv = res1.predict(start, end, dynamic=True)
## start < p, end >nobs 1959q3 - 2015q4
#start, end = 2, 227
#fv = res1.predict(start, end, dynamic=True)
# start 0, end >0 1960q1 - 1971q4
start, end = 5, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end nobs 1960q1 - 2009q3
start, end = 5, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start 0, end >nobs 1960q1 - 2015q4
start, end = 5, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[start:end+1], DECIMAL_4)
# start >p, end >0 1965q1 - 1971q4
start, end = 24, 51
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end nobs 1965q1 - 2009q3
start, end = 24, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start >p, end >nobs 1965q1 - 2015q4
start, end = 24, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn2[start:end+1], DECIMAL_4)
# start nobs, end nobs 2009q3 - 2009q3
start, end = 202, 202
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn3[start:end+1], DECIMAL_4)
# start nobs, end >nobs 2009q3 - 2015q4
start, end = 202, 227
fv = res1.model.predict(params, start, end, dynamic=True)
# start >nobs, end >nobs 2009q4 - 2015q4
start, end = 203, 227
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn4[start:end+1], DECIMAL_4)
# defaults
start, end = None, None
fv = res1.model.predict(params, start, end, dynamic=True)
assert_almost_equal(fv, fcdyn[5:203], DECIMAL_4)
def test_arima_wrapper():
cpi = load_macrodata_pandas().data['cpi']
cpi.index = pandas.Index(cpi_dates)
res = ARIMA(cpi, (4,1,1), freq='Q').fit(disp=-1)
assert_equal(res.params.index, pandas.Index(['const', 'ar.L1.D.cpi', 'ar.L2.D.cpi',
'ar.L3.D.cpi', 'ar.L4.D.cpi',
'ma.L1.D.cpi']))
assert_equal(res.model.endog_names, 'D.cpi')
def test_1dexog():
# smoke test, this will raise an error if broken
dta = load_macrodata_pandas().data
endog = dta['realcons'].values
exog = dta['m1'].values.squeeze()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod = ARMA(endog, (1,1), exog).fit(disp=-1)
mod.predict(193, 203, exog[-10:])
# check for dynamic is true and pandas Series see #2589
mod.predict(193, 202, exog[-10:], dynamic=True)
dta.index = pandas.Index(cpi_dates)
mod = ARMA(dta['realcons'], (1,1), dta['m1']).fit(disp=-1)
mod.predict(dta.index[-10], dta.index[-1], exog=dta['m1'][-10:], dynamic=True)
mod = ARMA(dta['realcons'], (1,1), dta['m1']).fit(trend='nc', disp=-1)
mod.predict(dta.index[-10], dta.index[-1], exog=dta['m1'][-10:], dynamic=True)
def test_arima_predict_bug():
#predict_start_date wasn't getting set on start = None
from statsmodels.datasets import sunspots
dta = sunspots.load_pandas().data.SUNACTIVITY
dta.index = pandas.Index(dates_from_range('1700', '2008'))
arma_mod20 = ARMA(dta, (2,0)).fit(disp=-1)
arma_mod20.predict(None, None)
# test prediction with time stamp, see #2587
predict = arma_mod20.predict(dta.index[-20], dta.index[-1])
assert_(predict.index.equals(dta.index[-20:]))
predict = arma_mod20.predict(dta.index[-20], dta.index[-1], dynamic=True)
assert_(predict.index.equals(dta.index[-20:]))
# partially out of sample
predict_dates = pandas.Index(dates_from_range('2000', '2015'))
predict = arma_mod20.predict(predict_dates[0], predict_dates[-1])
assert_(predict.index.equals(predict_dates))
#assert_(1 == 0)
def test_arima_predict_q2():
# bug with q > 1 for arima predict
inv = load_macrodata().data['realinv']
arima_mod = ARIMA(np.log(inv), (1,1,2)).fit(start_params=[0,0,0,0], disp=-1)
fc, stderr, conf_int = arima_mod.forecast(5)
# values copy-pasted from gretl
assert_almost_equal(fc,
[7.306320, 7.313825, 7.321749, 7.329827, 7.337962],
5)
def test_arima_predict_pandas_nofreq():
# this is issue 712
from pandas import DataFrame
dates = ["2010-01-04", "2010-01-05", "2010-01-06", "2010-01-07",
"2010-01-08", "2010-01-11", "2010-01-12", "2010-01-11",
"2010-01-12", "2010-01-13", "2010-01-17"]
close = [626.75, 623.99, 608.26, 594.1, 602.02, 601.11, 590.48, 587.09,
589.85, 580.0,587.62]
data = DataFrame(close, index=DatetimeIndex(dates), columns=["close"])
#TODO: fix this names bug for non-string names names
arma = ARMA(data, order=(1,0)).fit(disp=-1)
# first check that in-sample prediction works
predict = arma.predict()
assert_(predict.index.equals(data.index))
# check that this raises an exception when date not on index
assert_raises(ValueError, arma.predict, start="2010-1-9", end=10)
assert_raises(ValueError, arma.predict, start="2010-1-9", end="2010-1-17")
# raise because end not on index
assert_raises(ValueError, arma.predict, start="2010-1-4", end="2010-1-10")
# raise because end not on index
assert_raises(ValueError, arma.predict, start=3, end="2010-1-10")
predict = arma.predict(start="2010-1-7", end=10) # should be of length 10
assert_(len(predict) == 8)
assert_(predict.index.equals(data.index[3:10+1]))
predict = arma.predict(start="2010-1-7", end=14)
assert_(predict.index.equals(pandas.Index(lrange(3, 15))))
predict = arma.predict(start=3, end=14)
assert_(predict.index.equals(pandas.Index(lrange(3, 15))))
# end can be a date if it's in the sample and on the index
# predict dates is just a slice of the dates index then
predict = arma.predict(start="2010-1-6", end="2010-1-13")
assert_(predict.index.equals(data.index[2:10]))
predict = arma.predict(start=2, end="2010-1-13")
assert_(predict.index.equals(data.index[2:10]))
def test_arima_predict_exog():
# check 625 and 626
#from statsmodels.tsa.arima_process import arma_generate_sample
#arparams = np.array([1, -.45, .25])
#maparams = np.array([1, .15])
#nobs = 100
#np.random.seed(123)
#y = arma_generate_sample(arparams, maparams, nobs, burnin=100)
## make an exogenous trend
#X = np.array(lrange(nobs)) / 20.0
## add a constant
#y += 2.5
from pandas import read_csv
arima_forecasts = read_csv(current_path + "/results/"
"results_arima_exog_forecasts_mle.csv")
y = arima_forecasts["y"].dropna()
X = np.arange(len(y) + 25)/20.
predict_expected = arima_forecasts["predict"]
arma_res = ARMA(y.values, order=(2,1), exog=X[:100]).fit(trend="c",
disp=-1)
# params from gretl
params = np.array([2.786912485145725, -0.122650190196475,
0.533223846028938, -0.319344321763337,
0.132883233000064])
assert_almost_equal(arma_res.params, params, 5)
# no exog for in-sample
predict = arma_res.predict()
assert_almost_equal(predict, predict_expected.values[:100], 5)
# check 626
assert_(len(arma_res.model.exog_names) == 5)
# exog for out-of-sample and in-sample dynamic
predict = arma_res.model.predict(params, end=124, exog=X[100:])
assert_almost_equal(predict, predict_expected.values, 6)
# conditional sum of squares
#arima_forecasts = read_csv(current_path + "/results/"
# "results_arima_exog_forecasts_css.csv")
#predict_expected = arima_forecasts["predict"].dropna()
#arma_res = ARMA(y.values, order=(2,1), exog=X[:100]).fit(trend="c",
# method="css",
# disp=-1)
#params = np.array([2.152350033809826, -0.103602399018814,
# 0.566716580421188, -0.326208009247944,
# 0.102142932143421])
#predict = arma_res.model.predict(params)
## in-sample
#assert_almost_equal(predict, predict_expected.values[:98], 6)
#predict = arma_res.model.predict(params, end=124, exog=X[100:])
## exog for out-of-sample and in-sample dynamic
#assert_almost_equal(predict, predict_expected.values, 3)
def test_arima_no_diff():
# issue 736
# smoke test, predict will break if we have ARIMAResults but
# ARMA model, need ARIMA(p, 0, q) to return an ARMA in init.
ar = [1, -.75, .15, .35]
ma = [1, .25, .9]
y = arma_generate_sample(ar, ma, 100)
mod = ARIMA(y, (3, 0, 2))
assert_(type(mod) is ARMA)
res = mod.fit(disp=-1)
# smoke test just to be sure
res.predict()
def test_arima_predict_noma():
# issue 657
# smoke test
ar = [1, .75]
ma = [1]
data = arma_generate_sample(ar, ma, 100)
arma = ARMA(data, order=(0,1))
arma_res = arma.fit(disp=-1)
arma_res.forecast(1)
def test_arimax():
dta = load_macrodata_pandas().data
dates = dates_from_range("1959Q1", length=len(dta))
dta.index = cpi_dates
dta = dta[["realdpi", "m1", "realgdp"]]
y = dta.pop("realdpi")
# 1 exog
#X = dta.ix[1:]["m1"]
#res = ARIMA(y, (2, 1, 1), X).fit(disp=-1)
#params = [23.902305009084373, 0.024650911502790, -0.162140641341602,
# 0.165262136028113, -0.066667022903974]
#assert_almost_equal(res.params.values, params, 6)
# 2 exog
X = dta
res = ARIMA(y, (2, 1, 1), X).fit(disp=False, solver="nm", maxiter=1000,
ftol=1e-12, xtol=1e-12)
# from gretl
#params = [13.113976653926638, -0.003792125069387, 0.004123504809217,
# -0.199213760940898, 0.151563643588008, -0.033088661096699]
# from stata using double
stata_llf = -1076.108614859121
params = [13.1259220104, -0.00376814509403812, 0.00411970083135622,
-0.19921477896158524, 0.15154396192855729, -0.03308400760360837]
# we can get close
assert_almost_equal(res.params.values, params, 4)
# This shows that it's an optimizer problem and not a problem in the code
assert_almost_equal(res.model.loglike(np.array(params)), stata_llf, 6)
X = dta.diff()
X.iloc[0] = 0
res = ARIMA(y, (2, 1, 1), X).fit(disp=False)
# gretl won't estimate this - looks like maybe a bug on their part,
# but we can just fine, we're close to Stata's answer
# from Stata
params = [19.5656863783347, 0.32653841355833396198,
0.36286527042965188716, -1.01133792126884,
-0.15722368379307766206, 0.69359822544092153418]
assert_almost_equal(res.params.values, params, 3)
def test_bad_start_params():
endog = np.array([820.69093, 781.0103028, 785.8786988, 767.64282267,
778.9837648 , 824.6595702 , 813.01877867, 751.65598567,
753.431091 , 746.920813 , 795.6201904 , 772.65732833,
793.4486454 , 868.8457766 , 823.07226547, 783.09067747,
791.50723847, 770.93086347, 835.34157333, 810.64147947,
738.36071367, 776.49038513, 822.93272333, 815.26461227,
773.70552987, 777.3726522 , 811.83444853, 840.95489133,
777.51031933, 745.90077307, 806.95113093, 805.77521973,
756.70927733, 749.89091773, 1694.2266924 , 2398.4802244 ,
1434.6728516 , 909.73940427, 929.01291907, 769.07561453,
801.1112548 , 796.16163313, 817.2496376 , 857.73046447,
838.849345 , 761.92338873, 731.7842242 , 770.4641844 ])
mod = ARMA(endog, (15, 0))
assert_raises(ValueError, mod.fit)
inv = load_macrodata().data['realinv']
arima_mod = ARIMA(np.log(inv), (1,1,2))
assert_raises(ValueError, mod.fit)
def test_arima_small_data_bug():
# Issue 1038, too few observations with given order
from datetime import datetime
import statsmodels.api as sm
vals = [96.2, 98.3, 99.1, 95.5, 94.0, 87.1, 87.9, 86.7402777504474]
dr = dates_from_range("1990q1", length=len(vals))
ts = pandas.TimeSeries(vals, index=dr)
df = pandas.DataFrame(ts)
mod = sm.tsa.ARIMA(df, (2, 0, 2))
assert_raises(ValueError, mod.fit)
def test_arima_dataframe_integer_name():
# Smoke Test for Issue 1038
from datetime import datetime
import statsmodels.api as sm
vals = [96.2, 98.3, 99.1, 95.5, 94.0, 87.1, 87.9, 86.7402777504474,
94.0, 96.5, 93.3, 97.5, 96.3, 92.]
dr = dates_from_range("1990q1", length=len(vals))
ts = pandas.TimeSeries(vals, index=dr)
df = pandas.DataFrame(ts)
mod = sm.tsa.ARIMA(df, (2, 0, 2))
def test_arima_exog_predict_1d():
# test 1067
np.random.seed(12345)
y = np.random.random(100)
x = np.random.random(100)
mod = ARMA(y, (2, 1), x).fit(disp=-1)
newx = np.random.random(10)
results = mod.forecast(steps=10, alpha=0.05, exog=newx)
def test_arima_1123():
# test ARMAX predict when trend is none
np.random.seed(12345)
arparams = np.array([.75, -.25])
maparams = np.array([.65, .35])
arparam = np.r_[1, -arparams]
maparam = np.r_[1, maparams]
nobs = 20
dates = dates_from_range('1980',length=nobs)
y = arma_generate_sample(arparams, maparams, nobs)
X = np.random.randn(nobs)
y += 5*X
mod = ARMA(y[:-1], order=(1,0), exog=X[:-1])
res = mod.fit(trend='nc', disp=False)
fc = res.forecast(exog=X[-1:])
# results from gretl
assert_almost_equal(fc[0], 2.200393, 6)
assert_almost_equal(fc[1], 1.030743, 6)
assert_almost_equal(fc[2][0,0], 0.180175, 6)
assert_almost_equal(fc[2][0,1], 4.220611, 6)
mod = ARMA(y[:-1], order=(1,1), exog=X[:-1])
res = mod.fit(trend='nc', disp=False)
fc = res.forecast(exog=X[-1:])
assert_almost_equal(fc[0], 2.765688, 6)
assert_almost_equal(fc[1], 0.835048, 6)
assert_almost_equal(fc[2][0,0], 1.129023, 6)
assert_almost_equal(fc[2][0,1], 4.402353, 6)
# make sure this works to. code looked fishy.
mod = ARMA(y[:-1], order=(1,0), exog=X[:-1])
res = mod.fit(trend='c', disp=False)
fc = res.forecast(exog=X[-1:])
assert_almost_equal(fc[0], 2.481219, 6)
assert_almost_equal(fc[1], 0.968759, 6)
assert_almost_equal(fc[2][0], [0.582485, 4.379952], 6)
def test_small_data():
# 1146
y = [-1214.360173, -1848.209905, -2100.918158, -3647.483678, -4711.186773]
# refuse to estimate these
assert_raises(ValueError, ARIMA, y, (2, 0, 3))
assert_raises(ValueError, ARIMA, y, (1, 1, 3))
mod = ARIMA(y, (1, 0, 3))
assert_raises(ValueError, mod.fit, trend="c")
# try to estimate these...leave it up to the user to check for garbage
# and be clear, these are garbage parameters.
# X-12 arima will estimate, gretl refuses to estimate likely a problem
# in start params regression.
res = mod.fit(trend="nc", disp=0, start_params=[.1,.1,.1,.1])
mod = ARIMA(y, (1, 0, 2))
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=0, start_params=[np.mean(y), .1, .1, .1])
class TestARMA00(TestCase):
@classmethod
def setup_class(cls):
from statsmodels.datasets.sunspots import load
sunspots = load().data['SUNACTIVITY']
cls.y = y = sunspots
cls.arma_00_model = ARMA(y, order=(0, 0))
cls.arma_00_res = cls.arma_00_model.fit(disp=-1)
def test_parameters(self):
params = self.arma_00_res.params
assert_almost_equal(self.y.mean(), params)
def test_predictions(self):
predictions = self.arma_00_res.predict()
assert_almost_equal(self.y.mean() * np.ones_like(predictions), predictions)
@nottest
def test_information_criteria(self):
# This test is invalid since the ICs differ due to df_model differences
# between OLS and ARIMA
res = self.arma_00_res
y = self.y
ols_res = OLS(y, np.ones_like(y)).fit(disp=-1)
ols_ic = np.array([ols_res.aic, ols_res.bic])
arma_ic = np.array([res.aic, res.bic])
assert_almost_equal(ols_ic, arma_ic, DECIMAL_4)
def test_arma_00_nc(self):
arma_00 = ARMA(self.y, order=(0, 0))
assert_raises(ValueError, arma_00.fit, trend='nc', disp=-1)
def test_css(self):
arma = ARMA(self.y, order=(0, 0))
fit = arma.fit(method='css', disp=-1)
predictions = fit.predict()
assert_almost_equal(self.y.mean() * np.ones_like(predictions), predictions)
def test_arima(self):
yi = np.cumsum(self.y)
arima = ARIMA(yi, order=(0, 1, 0))
fit = arima.fit(disp=-1)
assert_almost_equal(np.diff(yi).mean(), fit.params, DECIMAL_4)
def test_arma_ols(self):
y = self.y
y_lead = y[1:]
y_lag = y[:-1]
T = y_lag.shape[0]
X = np.hstack((np.ones((T,1)), y_lag[:,None]))
ols_res = OLS(y_lead, X).fit()
arma_res = ARMA(y_lead,order=(0,0),exog=y_lag).fit(trend='c', disp=-1)
assert_almost_equal(ols_res.params, arma_res.params)
def test_arma_exog_no_constant(self):
y = self.y
y_lead = y[1:]
y_lag = y[:-1]
X = y_lag[:,None]
ols_res = OLS(y_lead, X).fit()
arma_res = ARMA(y_lead,order=(0,0),exog=y_lag).fit(trend='nc', disp=-1)
assert_almost_equal(ols_res.params, arma_res.params)
pass
def test_arima_dates_startatend():
# bug
np.random.seed(18)
x = pandas.TimeSeries(np.random.random(36),
index=pandas.DatetimeIndex(start='1/1/1990',
periods=36, freq='M'))
res = ARIMA(x, (1, 0, 0)).fit(disp=0)
pred = res.predict(start=len(x), end=len(x))
assert_(pred.index[0] == x.index.shift(1)[-1])
fc = res.forecast()[0]
assert_almost_equal(pred.values[0], fc)
def test_arma_missing():
from statsmodels.base.data import MissingDataError
# bug 1343
y = np.random.random(40)
y[-1] = np.nan
assert_raises(MissingDataError, ARMA, y, (1, 0), missing='raise')
@dec.skipif(not have_matplotlib)
def test_plot_predict():
from statsmodels.datasets.sunspots import load_pandas
dta = load_pandas().data[['SUNACTIVITY']]
dta.index = DatetimeIndex(start='1700', end='2009', freq='A')
res = ARMA(dta, (3, 0)).fit(disp=-1)
fig = res.plot_predict('1990', '2012', dynamic=True, plot_insample=False)
plt.close(fig)
res = ARIMA(dta, (3, 1, 0)).fit(disp=-1)
fig = res.plot_predict('1990', '2012', dynamic=True, plot_insample=False)
plt.close(fig)
def test_arima_diff2():
dta = load_macrodata_pandas().data['cpi']
dates = dates_from_range("1959Q1", length=len(dta))
dta.index = cpi_dates
mod = ARIMA(dta, (3, 2, 1)).fit(disp=-1)
fc, fcerr, conf_int = mod.forecast(10)
# forecasts from gretl
conf_int_res = [ (216.139, 219.231),
(216.472, 221.520),
(217.064, 223.649),
(217.586, 225.727),
(218.119, 227.770),
(218.703, 229.784),
(219.306, 231.777),
(219.924, 233.759),
(220.559, 235.735),
(221.206, 237.709)]
fc_res = [217.685, 218.996, 220.356, 221.656, 222.945, 224.243, 225.541,
226.841, 228.147, 229.457]
fcerr_res = [0.7888, 1.2878, 1.6798, 2.0768, 2.4620, 2.8269, 3.1816,
3.52950, 3.8715, 4.2099]
assert_almost_equal(fc, fc_res, 3)
assert_almost_equal(fcerr, fcerr_res, 3)
assert_almost_equal(conf_int, conf_int_res, 3)
predicted = mod.predict('2008Q1', '2012Q1', typ='levels')
predicted_res = [214.464, 215.478, 221.277, 217.453, 212.419, 213.530,
215.087, 217.685 , 218.996 , 220.356 , 221.656 ,
222.945 , 224.243 , 225.541 , 226.841 , 228.147 ,
229.457]
assert_almost_equal(predicted, predicted_res, 3)
def test_arima111_predict_exog_2127():
# regression test for issue #2127
ef = [ 0.03005, 0.03917, 0.02828, 0.03644, 0.03379, 0.02744,
0.03343, 0.02621, 0.0305 , 0.02455, 0.03261, 0.03507,
0.02734, 0.05373, 0.02677, 0.03443, 0.03331, 0.02741,
0.03709, 0.02113, 0.03343, 0.02011, 0.03675, 0.03077,
0.02201, 0.04844, 0.05518, 0.03765, 0.05433, 0.03049,
0.04829, 0.02936, 0.04421, 0.02457, 0.04007, 0.03009,
0.04504, 0.05041, 0.03651, 0.02719, 0.04383, 0.02887,
0.0344 , 0.03348, 0.02364, 0.03496, 0.02549, 0.03284,
0.03523, 0.02579, 0.0308 , 0.01784, 0.03237, 0.02078,
0.03508, 0.03062, 0.02006, 0.02341, 0.02223, 0.03145,
0.03081, 0.0252 , 0.02683, 0.0172 , 0.02225, 0.01579,
0.02237, 0.02295, 0.0183 , 0.02356, 0.02051, 0.02932,
0.03025, 0.0239 , 0.02635, 0.01863, 0.02994, 0.01762,
0.02837, 0.02421, 0.01951, 0.02149, 0.02079, 0.02528,
0.02575, 0.01634, 0.02563, 0.01719, 0.02915, 0.01724,
0.02804, 0.0275 , 0.02099, 0.02522, 0.02422, 0.03254,
0.02095, 0.03241, 0.01867, 0.03998, 0.02212, 0.03034,
0.03419, 0.01866, 0.02623, 0.02052]
ue = [ 4.9, 5. , 5. , 5. , 4.9, 4.7, 4.8, 4.7, 4.7,
4.6, 4.6, 4.7, 4.7, 4.5, 4.4, 4.5, 4.4, 4.6,
4.5, 4.4, 4.5, 4.4, 4.6, 4.7, 4.6, 4.7, 4.7,
4.7, 5. , 5. , 4.9, 5.1, 5. , 5.4, 5.6, 5.8,
6.1, 6.1, 6.5, 6.8, 7.3, 7.8, 8.3, 8.7, 9. ,
9.4, 9.5, 9.5, 9.6, 9.8, 10. , 9.9, 9.9, 9.7,
9.8, 9.9, 9.9, 9.6, 9.4, 9.5, 9.5, 9.5, 9.5,
9.8, 9.4, 9.1, 9. , 9. , 9.1, 9. , 9.1, 9. ,
9. , 9. , 8.8, 8.6, 8.5, 8.2, 8.3, 8.2, 8.2,
8.2, 8.2, 8.2, 8.1, 7.8, 7.8, 7.8, 7.9, 7.9,
7.7, 7.5, 7.5, 7.5, 7.5, 7.3, 7.2, 7.2, 7.2,
7. , 6.7, 6.6, 6.7, 6.7, 6.3, 6.3]
# rescaling results in convergence failure
#model = sm.tsa.ARIMA(np.array(ef)*100, (1,1,1), exog=ue)
model = ARIMA(ef, (1,1,1), exog=ue)
res = model.fit(transparams=False, iprint=0, disp=0)
predicts = res.predict(start=len(ef), end = len(ef)+10,
exog=ue[-11:], typ = 'levels')
# regression test, not verified numbers
# if exog=ue in predict, which values are used ?
predicts_res = np.array(
[ 0.02612291, 0.02361929, 0.024966 , 0.02448193, 0.0248772 ,
0.0248762 , 0.02506319, 0.02516542, 0.02531214, 0.02544654,
0.02559099, 0.02550931])
# if exog=ue[-11:] in predict
predicts_res = np.array(
[ 0.02591112, 0.02321336, 0.02436593, 0.02368773, 0.02389767,
0.02372018, 0.02374833, 0.02367407, 0.0236443 , 0.02362868,
0.02362312])
assert_allclose(predicts, predicts_res, atol=1e-6)
def test_ARIMA_exog_predict():
# test forecasting and dynamic prediction with exog against Stata
dta = load_macrodata_pandas().data
dates = dates_from_range("1959Q1", length=len(dta))
cpi_dates = dates_from_range('1959Q1', '2009Q3')
dta.index = cpi_dates
data = dta
data['loginv'] = np.log(data['realinv'])
data['loggdp'] = np.log(data['realgdp'])
data['logcons'] = np.log(data['realcons'])
forecast_period = dates_from_range('2008Q2', '2009Q3')
end = forecast_period[0]
data_sample = data.ix[dta.index < end]
exog_full = data[['loggdp', 'logcons']]
# pandas
mod = ARIMA(data_sample['loginv'], (1,0,1), exog=data_sample[['loggdp', 'logcons']])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=0, solver='bfgs', maxiter=5000)
predicted_arma_fp = res.predict(start=197, end=202, exog=exog_full.values[197:]).values
predicted_arma_dp = res.predict(start=193, end=202, exog=exog_full[197:], dynamic=True)
# numpy
mod2 = ARIMA(np.asarray(data_sample['loginv']), (1,0,1),
exog=np.asarray(data_sample[['loggdp', 'logcons']]))
res2 = mod2.fit(start_params=res.params, disp=0, solver='bfgs', maxiter=5000)
exog_full = data[['loggdp', 'logcons']]
predicted_arma_f = res2.predict(start=197, end=202, exog=exog_full.values[197:])
predicted_arma_d = res2.predict(start=193, end=202, exog=exog_full[197:], dynamic=True)
#ARIMA(1, 1, 1)
ex = np.asarray(data_sample[['loggdp', 'logcons']].diff())
# The first obsevation is not (supposed to be) used, but I get a Lapack problem
# Intel MKL ERROR: Parameter 5 was incorrect on entry to DLASCL.
ex[0] = 0
mod111 = ARIMA(np.asarray(data_sample['loginv']), (1,1,1),
# Stata differences also the exog
exog=ex)
res111 = mod111.fit(disp=0, solver='bfgs', maxiter=5000)
exog_full_d = data[['loggdp', 'logcons']].diff()
res111.predict(start=197, end=202, exog=exog_full_d.values[197:])
predicted_arima_f = res111.predict(start=196, end=202, exog=exog_full_d.values[197:], typ='levels')
predicted_arima_d = res111.predict(start=193, end=202, exog=exog_full_d.values[197:], typ='levels', dynamic=True)
res_f101 = np.array([ 7.73975859954, 7.71660108543, 7.69808978329, 7.70872117504,
7.6518392758 , 7.69784279784, 7.70290907856, 7.69237782644,
7.65017785174, 7.66061689028, 7.65980022857, 7.61505314129,
7.51697158428, 7.5165760663 , 7.5271053284 ])
res_f111 = np.array([ 7.74460013693, 7.71958207517, 7.69629561172, 7.71208186737,
7.65758850178, 7.69223472572, 7.70411775588, 7.68896109499,
7.64016249001, 7.64871881901, 7.62550283402, 7.55814609462,
7.44431310053, 7.42963968062, 7.43554675427])
res_d111 = np.array([ 7.74460013693, 7.71958207517, 7.69629561172, 7.71208186737,
7.65758850178, 7.69223472572, 7.71870821151, 7.7299430215 ,
7.71439447355, 7.72544001101, 7.70521902623, 7.64020040524,
7.5281927191 , 7.5149442694 , 7.52196378005])
res_d101 = np.array([ 7.73975859954, 7.71660108543, 7.69808978329, 7.70872117504,
7.6518392758 , 7.69784279784, 7.72522142662, 7.73962377858,
7.73245950636, 7.74935432862, 7.74449584691, 7.69589103679,
7.5941274688 , 7.59021764836, 7.59739267775])
assert_allclose(predicted_arma_dp, res_d101[-len(predicted_arma_d):], atol=1e-4)
assert_allclose(predicted_arma_fp, res_f101[-len(predicted_arma_f):], atol=1e-4)
assert_allclose(predicted_arma_d, res_d101[-len(predicted_arma_d):], atol=1e-4)
assert_allclose(predicted_arma_f, res_f101[-len(predicted_arma_f):], atol=1e-4)
assert_allclose(predicted_arima_d, res_d111[-len(predicted_arima_d):], rtol=1e-4, atol=1e-4)
assert_allclose(predicted_arima_f, res_f111[-len(predicted_arima_f):], rtol=1e-4, atol=1e-4)
# test for forecast with 0 ar fix in #2457 numbers again from Stata
res_f002 = np.array([ 7.70178181209, 7.67445481224, 7.6715373765 , 7.6772915319 ,
7.61173201163, 7.67913499878, 7.6727609212 , 7.66275451925,
7.65199799315, 7.65149983741, 7.65554131408, 7.62213286298,
7.53795983357, 7.53626130154, 7.54539963934])
res_d002 = np.array([ 7.70178181209, 7.67445481224, 7.6715373765 , 7.6772915319 ,
7.61173201163, 7.67913499878, 7.67306697759, 7.65287924998,
7.64904451605, 7.66580449603, 7.66252081172, 7.62213286298,
7.53795983357, 7.53626130154, 7.54539963934])
mod_002 = ARIMA(np.asarray(data_sample['loginv']), (0,0,2),
exog=np.asarray(data_sample[['loggdp', 'logcons']]))
# doesn't converge with default starting values
res_002 = mod_002.fit(start_params=np.concatenate((res.params[[0, 1, 2, 4]], [0])),
disp=0, solver='bfgs', maxiter=5000)
# forecast
fpredict_002 = res_002.predict(start=197, end=202, exog=exog_full.values[197:])
forecast_002 = res_002.forecast(steps=len(exog_full.values[197:]),
exog=exog_full.values[197:])
forecast_002 = forecast_002[0] # TODO we are not checking the other results
assert_allclose(fpredict_002, res_f002[-len(fpredict_002):], rtol=1e-4, atol=1e-6)
assert_allclose(forecast_002, res_f002[-len(forecast_002):], rtol=1e-4, atol=1e-6)
# dynamic predict
dpredict_002 = res_002.predict(start=193, end=202, exog=exog_full.values[197:],
dynamic=True)
assert_allclose(dpredict_002, res_d002[-len(dpredict_002):], rtol=1e-4, atol=1e-6)
def test_arima_fit_mutliple_calls():
y = [-1214.360173, -1848.209905, -2100.918158, -3647.483678, -4711.186773]
mod = ARIMA(y, (1, 0, 2))
# Make multiple calls to fit
mod.fit(disp=0, start_params=[np.mean(y), .1, .1, .1])
assert_equal(mod.exog_names, ['const', 'ar.L1.y', 'ma.L1.y', 'ma.L2.y'])
mod.fit(disp=0, start_params=[np.mean(y), .1, .1, .1])
assert_equal(mod.exog_names, ['const', 'ar.L1.y', 'ma.L1.y', 'ma.L2.y'])
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb'], exit=False)
|
bsd-3-clause
|
johnmgregoire/JCAPdatavis
|
echem_FCVSsurfaceareacaller.py
|
1
|
4484
|
import numpy, scipy
from matplotlib.ticker import FuncFormatter
import matplotlib.colors as colors
from echem_plate_math import *
import time, pickle
from echem_plate_fcns import *
from echem_FCVSsurfacearea3 import *
#folder='C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe_3V_FCV_4835'
#savefolder='C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe_3V_FCV_4835'
folder='C:/Users/Public/Documents/EchemDropRawData/full plate FCV/20130530 NiFeCoCe_plate1_FCV_5577'
savefolder='C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe/20130530 NiFeCoCe_plate1_FCV_5577'
#folder='C:/Users/Public/Documents/EchemDropRawData/full plate FCV/20130602 2nd NiFeCoCe_plate2_FCV_5498'
#savefolder='C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe/20130602 2nd NiFeCoCe_plate2_FCV_5498'
#folder='C:/Users/Public/Documents/EchemDropRawData/full plate FCV/20130524 NiFeCoCe_plate3_FCV_4835'
#savefolder='C:/Users/Public/Documents/EchemDropAnalyzedData/FCVdata/20130523 NiFeCoCe/20130524 NiFeCoCe_plate3_FCV_4835'
if not os.path.exists(savefolder):
os.mkdir(savefolder)
startpath_fom=os.path.join(savefolder, os.path.split(folder)[1])
fns=os.listdir(folder)
ext='txt'
fns=numpy.array([fn for fn in fns if 'FCVS' in fn and fn.endswith(ext) and fn.startswith('Sample')])
fnstartarr=numpy.array([fn.partition('_')[0] for fn in fns])
fnstartset=set(fnstartarr)
pathliststoread=[[os.path.join(folder, fn) for fn in fns[numpy.where(fnstartarr==fnstart)]] for fnstart in fnstartset]
dlist=[]
for pl in pathliststoread:
saven=os.path.split(pl[0])[1].partition('_')[0]
saven+='_FCVSanalysis.png'
savep=os.path.join(savefolder, saven)
pylab.figure(num=1)
try:
Capac, CurrIntercept, CapacFitR2, d=calccapacitivecurrent(pl, vscanrangefrac=(.15, .85), vendrangefrac=(.02, .98), vendtol=0.01, plotfignum=1, plotsavepath=savep, returndict=True)
except:
pylab.clf()
print 'Failed analysis on ', pl
continue
dtemp={}
for k in ['Sample', 'elements', 'compositions', 'x', 'y', 'vstartinds_seg', 'vlen_seg','dEdt_seg', 'dIdE_seg', 'dEdtmean_cycs', 'delI_cycs', 'CC_dEdtfitpars', 'CC_dEdtfitR2', 'Capac', 'CurrIntercept', 'dIdt_fwdrevratio']:
dtemp[k]=d[k]
# dtemp['Capac']=Capac
# dtemp['CurrIntercept']=CurrIntercept
# dtemp['CapacFitR2']=CapacFitR2
dlist+=[dtemp]
pylab.clf()
#Capac, CurrIntercept=calccapacitivecurrent([p, p2],plotfignum=1)
#pylab.show()
def writefile(p, dlist, savedlist=True, fomkey='FOM'):
if len(dlist)==0:
print 'no data to save'
return
labels=['Sample', 'x(mm)', 'y(mm)']
labels+=dlist[0]['elements']
labels+=[fomkey]
kv_fmt=[('Sample', '%d'), ('x', '%.2f'), ('y', '%.2f'), ('compositions', '%.4f'), (fomkey, '%.6e')]
arr=[]
for d in dlist:
arr2=[]
for k, fmt in kv_fmt:
v=d[k]
if isinstance(v, numpy.ndarray) or isinstance(v, list):
for subv in v:
arr2+=[fmt %subv]
else:
arr2+=[fmt %v]
arr+=['\t'.join(arr2)]
s='\t'.join(labels)+'\n'
s+='\n'.join(arr)
f=open(p, mode='w')
f.write(s)
f.close()
if savedlist:
f=open(p[:-4]+'_dlist.pck', mode='w')
pickle.dump(dlist, f)
f.close()
for fomkey in ['Capac', 'CurrIntercept', 'CC_dEdtfitR2', 'dIdt_fwdrevratio']:
p=startpath_fom+'_'+fomkey+'.txt'
#p=p[::-1].replace('plate'[::-1], 'plate1'[::-1], 1)[::-1]#temporary fix for file naming for stacked_tern4
writefile(p, dlist, savedlist=(fomkey=='Capac'), fomkey=fomkey)
dlist=[d for d in dlist if d['CC_dEdtfitR2']>.8]
for fomkey in ['Capac', 'CurrIntercept', 'CC_dEdtfitR2', 'dIdt_fwdrevratio']:
p=startpath_fom+'_'+fomkey+'_filterR2.txt'
#p=p[::-1].replace('plate'[::-1], 'plate1'[::-1], 1)[::-1]#temporary fix for file naming for stacked_tern4
writefile(p, dlist, savedlist=(fomkey=='Capac'), fomkey=fomkey)
dlist=[d for d in dlist if (d['dIdt_fwdrevratio']>.8)&(d['dIdt_fwdrevratio']<1.25)] #lets the fwd:rev be between 4:5 and 5:4
for fomkey in ['Capac', 'CurrIntercept', 'CC_dEdtfitR2', 'dIdt_fwdrevratio']:
p=startpath_fom+'_'+fomkey+'_filterR2fwdrevratio.txt'
#p=p[::-1].replace('plate'[::-1], 'plate1'[::-1], 1)[::-1]#temporary fix for file naming for stacked_tern4
writefile(p, dlist, savedlist=(fomkey=='Capac'), fomkey=fomkey)
|
bsd-3-clause
|
lmallin/coverage_test
|
python_venv/lib/python2.7/site-packages/pandas/tests/io/msgpack/test_format.py
|
25
|
2882
|
# coding: utf-8
from pandas.io.msgpack import unpackb
def check(src, should, use_list=0):
assert unpackb(src, use_list=use_list) == should
def testSimpleValue():
check(b"\x93\xc0\xc2\xc3", (None, False, True, ))
def testFixnum():
check(b"\x92\x93\x00\x40\x7f\x93\xe0\xf0\xff", ((0,
64,
127, ),
(-32,
-16,
-1, ), ))
def testFixArray():
check(b"\x92\x90\x91\x91\xc0", ((), ((None, ), ), ), )
def testFixRaw():
check(b"\x94\xa0\xa1a\xa2bc\xa3def", (b"", b"a", b"bc", b"def", ), )
def testFixMap():
check(b"\x82\xc2\x81\xc0\xc0\xc3\x81\xc0\x80",
{False: {None: None},
True: {None: {}}}, )
def testUnsignedInt():
check(b"\x99\xcc\x00\xcc\x80\xcc\xff\xcd\x00\x00\xcd\x80\x00"
b"\xcd\xff\xff\xce\x00\x00\x00\x00\xce\x80\x00\x00\x00"
b"\xce\xff\xff\xff\xff",
(0,
128,
255,
0,
32768,
65535,
0,
2147483648,
4294967295, ), )
def testSignedInt():
check(b"\x99\xd0\x00\xd0\x80\xd0\xff\xd1\x00\x00\xd1\x80\x00"
b"\xd1\xff\xff\xd2\x00\x00\x00\x00\xd2\x80\x00\x00\x00"
b"\xd2\xff\xff\xff\xff", (0,
-128,
-1,
0,
-32768,
-1,
0,
-2147483648,
-1, ))
def testRaw():
check(b"\x96\xda\x00\x00\xda\x00\x01a\xda\x00\x02ab\xdb\x00\x00"
b"\x00\x00\xdb\x00\x00\x00\x01a\xdb\x00\x00\x00\x02ab",
(b"", b"a", b"ab", b"", b"a", b"ab"))
def testArray():
check(b"\x96\xdc\x00\x00\xdc\x00\x01\xc0\xdc\x00\x02\xc2\xc3\xdd\x00"
b"\x00\x00\x00\xdd\x00\x00\x00\x01\xc0\xdd\x00\x00\x00\x02"
b"\xc2\xc3", ((), (None, ), (False, True), (), (None, ),
(False, True)))
def testMap():
check(b"\x96"
b"\xde\x00\x00"
b"\xde\x00\x01\xc0\xc2"
b"\xde\x00\x02\xc0\xc2\xc3\xc2"
b"\xdf\x00\x00\x00\x00"
b"\xdf\x00\x00\x00\x01\xc0\xc2"
b"\xdf\x00\x00\x00\x02\xc0\xc2\xc3\xc2", ({}, {None: False},
{True: False,
None: False}, {},
{None: False},
{True: False,
None: False}))
|
mit
|
YuepengGuo/zipline
|
tests/test_rolling_panel.py
|
12
|
7118
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import MutableIndexRollingPanel, RollingPanel
from zipline.finance.trading import TradingEnvironment
class TestRollingPanel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
@classmethod
def tearDownClass(cls):
del cls.env
def test_alignment(self):
items = ('a', 'b')
sids = (1, 2)
dts = self.env.market_minute_window(
self.env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts[2:],
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
rp.extend_back(dts[:-2])
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts,
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
def test_get_current_multiple_call_same_tick(self):
"""
In old get_current, each call the get_current would copy the data. Thus
changing that object would have no side effects.
To keep the same api, make sure that the raw option returns a copy too.
"""
def data_id(values):
return values.__array_interface__['data']
items = ('a', 'b')
sids = (1, 2)
dts = self.env.market_minute_window(
self.env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
# each get_current call makea a copy
cur = rp.get_current()
cur2 = rp.get_current()
assert data_id(cur.values) != data_id(cur2.values)
# make sure raw follow same logic
raw = rp.get_current(raw=True)
raw2 = rp.get_current(raw=True)
assert data_id(raw) != data_id(raw2)
class TestMutableIndexRollingPanel(unittest.TestCase):
def test_basics(self, window=10):
items = ['bar', 'baz', 'foo']
minor = ['A', 'B', 'C', 'D']
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque(maxlen=window)
frames = {}
for i, date in enumerate(dates):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def test_adding_and_dropping_items(self, n_items=5, n_minor=10, window=10,
periods=30):
np.random.seed(123)
items = deque(range(n_items))
minor = deque(range(n_minor))
expected_items = deque(range(n_items))
expected_minor = deque(range(n_minor))
first_non_existant = max(n_items, n_minor) + 1
# We want to add new columns with random order
add_items = np.arange(first_non_existant, first_non_existant + periods)
np.random.shuffle(add_items)
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
expected_frames = deque(maxlen=window)
expected_dates = deque()
for i, (date, add_item) in enumerate(zip(dates, add_items)):
frame = pd.DataFrame(np.random.randn(n_items, n_minor),
index=items, columns=minor)
if i >= window:
# Old labels and dates should start to get dropped at every
# call
del frames[expected_dates.popleft()]
expected_minor.popleft()
expected_items.popleft()
expected_frames.append(frame)
expected_dates.append(date)
rp.add_frame(date, frame)
frames[date] = frame
result = rp.get_current()
np.testing.assert_array_equal(sorted(result.minor_axis.values),
sorted(expected_minor))
np.testing.assert_array_equal(sorted(result.items.values),
sorted(expected_items))
tm.assert_frame_equal(frame.T,
result.ix[frame.index, -1, frame.columns])
expected_result = pd.Panel(frames).swapaxes(0, 1)
tm.assert_panel_equal(expected_result,
result)
# Insert new items
minor.popleft()
minor.append(add_item)
items.popleft()
items.append(add_item)
expected_minor.append(add_item)
expected_items.append(add_item)
|
apache-2.0
|
mbway/Bayesian-Optimisation
|
old_library/plot.py
|
1
|
42108
|
#!/usr/bin/env python3
'''
Plotting methods for Optimiser objects (extracted from the main class
definitions because these methods are long and just add noise)
'''
# python 2 compatibility
from __future__ import (absolute_import, division, print_function, unicode_literals)
from .py2 import *
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
import matplotlib.transforms
from collections import defaultdict
from itertools import groupby
# local modules
from .utils import *
from .bayesian_utils import *
from . import plot3D
# note: Mixins have to be inherited in reverse, eg MyClass(Plotting, SuperClass)
class Norm(mpl.colors.Normalize):
'''
Useful to warp the colormap so that more of the available colors are
used on the range of interesting data.
'''
# need to implement __call__() and optionally levels()
def levels(self):
'''
returns: a numpy array for the values where the boundaries between
the colors should be placed
'''
return None
class MidpointNorm(Norm):
'''
Half of the color map is used for values which fall below the midpoint,
and half are used for values which fall above.
This can be used to draw attention to smaller differences at the extreme
ends of the observed values.
based on:
- http://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html
- https://matplotlib.org/users/colormapnorms.html
'''
def __init__(self, vmin, vmax, midpoint, res=100, clip=False):
'''
midpoint: the value to 'center' around
res: the 'resolution' ie number of distinct levels in the colorbar
'''
super().__init__(vmin, vmax, clip)
self.midpoint = midpoint
self.res = res
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def levels(self):
return np.concatenate((
np.linspace(self.vmin, self.midpoint, num=self.res/2, endpoint=False),
np.linspace(self.midpoint, self.vmax, num=self.res/2)))
class OptimiserPlotting:
'''
A mixin for providing generic plotting capabilities to an Optimiser object
(applicable to Grid/Random/Bayes).
'''
def group_by_param(self, param_name):
'''
return [value, [sample]] for each unique value of the given parameter
'''
param_key = lambda sample: sample.config[param_name]
data = []
# must be sorted before grouping
for val, samples in groupby(sorted(self.samples, key=param_key), param_key):
data.append((val, list(samples)))
return data
def group_by_params(self, param_a, param_b):
'''
return [(value_a, value_b), [sample]] for each unique pair of values of the given parameters
'''
params_key = lambda sample: (sample.config[param_a], sample.config[param_b])
data = []
# must be sorted before grouping
for val, samples in groupby(sorted(self.samples, key=params_key), params_key):
data.append((val, list(samples)))
return data
def plot_error_over_time(self, true_best, log_scale=True):
'''
plot a line graph showing the difference between the known optimal value
and the optimiser's best guess at each step.
true_best: the globally optimal value to compare to
log_scale: whether to plot on a logarithmic or a linear scale
'''
fig, ax = plt.subplots(figsize=(16, 10)) # inches
xs = range(1, len(self.samples)+1)
costs = [(true_best - s.cost if self.maximise_cost
else s.cost - true_best) for s in self.samples]
ax.plot(xs, costs, marker='o', markersize=4, color='#4c72b0', label='error')
ax.set_title('Error Over Time', fontsize=14)
ax.set_xlabel('samples')
ax.set_ylabel('cost')
if log_scale:
ax.set_yscale('log')
ax.margins(0.0, 0.15)
if len(self.samples) < 50:
ax.xaxis.set_major_locator(ticker.MultipleLocator(2.0))
elif len(self.samples) < 100:
ax.xaxis.set_major_locator(ticker.MultipleLocator(5.0))
ax.legend()
def plot_cost_over_time(self, plot_each=True, plot_best=True, true_best=None):
'''
plot a line graph showing the progress that the optimiser makes towards
the optimum as the number of samples increases.
plot_each: plot the cost of each sample
plot_best: plot the running-best cost
true_best: if available: plot a horizontal line for the best possible cost
'''
fig, ax = plt.subplots(figsize=(16, 10)) # inches
xs = range(1, len(self.samples)+1)
costs = [s.cost for s in self.samples]
if true_best is not None:
ax.axhline(true_best, color='black', label='true best')
if plot_best:
chooser = max if self.maximise_cost else min
best_cost = [chooser(costs[:x]) for x in xs]
ax.plot(xs, best_cost, color='#55a868')
best_x, best_cost = chooser(zip(xs, costs), key=lambda t: t[1])
# move the marker out of the way.
offset = 4.5 if self.maximise_cost else -4.5 # pt
offset = matplotlib.transforms.ScaledTranslation(0, offset/fig.dpi,
fig.dpi_scale_trans)
trans = ax.transData + offset
marker = 'v' if self.maximise_cost else '^'
ax.plot(best_x, best_cost, marker=marker, color='#55a868',
markersize=10, zorder=10, markeredgecolor='black',
markeredgewidth=1, label='best cost', transform=trans)
if plot_each:
ax.plot(xs, costs, marker='o', markersize=4, color='#4c72b0', label='cost')
ax.set_title('Cost Over Time', fontsize=14)
ax.set_xlabel('samples')
ax.set_ylabel('cost')
ax.margins(0.0, 0.15)
if len(self.samples) < 50:
ax.xaxis.set_major_locator(ticker.MultipleLocator(2.0))
elif len(self.samples) < 100:
ax.xaxis.set_major_locator(ticker.MultipleLocator(5.0))
ax.legend()
return fig
def plot_param(self, param_name, plot_boxplot=True, plot_samples=True,
plot_means=True, log_axes=(False, False)):
'''
plot a boxplot of parameter values against cost
plot_boxplot: whether to plot boxplots
plot_samples: whether to plot each sample as a point
plot_means: whether to plot a line through the mean costs
log_axes: (xaxis,yaxis) whether to display the axes with a logarithmic scale
'''
values = []
costs = []
means = []
for val, samples in self.group_by_param(param_name):
values.append(val)
c = [s.cost for s in samples]
costs.append(c)
means.append(np.mean(c))
labels = ['{:.2g}'.format(v) for v in values]
plt.figure(figsize=(16, 8))
if plot_means:
plt.plot(values, means, 'r-', linewidth=1, alpha=0.5)
if plot_samples:
xs, ys = zip(*[(s.config[param_name], s.cost) for s in self.samples])
plt.plot(xs, ys, 'go', markersize=5, alpha=0.6)
if plot_boxplot:
plt.boxplot(costs, positions=values, labels=labels)
plt.margins(0.1, 0.1)
plt.xlabel('parameter: ' + param_name)
plt.ylabel('cost')
if log_axes[0]:
plt.xscale('log')
if log_axes[1]:
plt.yscale('log')
plt.autoscale(True)
plt.show()
#TODO: instead of 'interactive' pass an argument of how many points to show, then deal with the slider business outside of optimisation.py and plot3D.py
def scatter_plot(self, param_a, param_b, interactive=True, color_by='cost',
log_axes=(False, False, False)):
'''
interactive: whether to display a slider for changing the number of
samples to display
color_by: either 'cost' or 'age'
log_axes: whether to display the x,y,z axes with a logarithmic scale
'''
assert color_by in ['cost', 'age']
xs, ys, costs, texts = [], [], [], []
for i, s in enumerate(self.samples):
xs.append(s.config[param_a])
ys.append(s.config[param_b])
costs.append(s.cost)
texts.append('sample {:03}, config: {}, cost: {}'.format(i+1, config_string(s.config), s.cost))
xs, ys, costs, texts = map(np.array, (xs, ys, costs, texts))
color = 'z' if color_by == 'cost' else 'age'
axes_names = ['param: ' + param_a, 'param: ' + param_b, 'cost']
plot3D.scatter3D(xs, ys, costs, interactive=interactive, color_by=color,
markersize=4, tooltips=texts, axes_names=axes_names,
log_axes=log_axes)
def surface_plot(self, param_a, param_b, log_axes=(False, False, False)):
'''
plot the surface of different values of param_a and param_b and how they
affect the cost (z-axis). If there are multiple configurations with the
same combination of param_a,param_b values then the minimum is taken for
the z/cost value.
This method does not require that in self.samples there is complete
coverage of all param_a and param_b values, or that the samples have a
particular ordering.
If there are gaps where a param_a,param_b combination has not yet been
evaluated, the cost for that point will be 0.
log_axes: whether to display the x,y,z axes with a logarithmic scale
'''
# get all the x and y values found in any of the samples (may not equal self.ranges[...])
xs = np.array(sorted(set([val for val, samples in self.group_by_param(param_a)])))
ys = np.array(sorted(set([val for val, samples in self.group_by_param(param_b)])))
costs = defaultdict(float) # if not all combinations of x and y are available: cost = 0
texts = defaultdict(lambda: 'no data')
for val, samples_for_val in self.group_by_params(param_a, param_b):
sample = min(samples_for_val, key=lambda s: s.cost)
costs[val] = sample.cost
texts[val] = 'config: {}, cost: {}'.format(config_string(sample.config), sample.cost)
xs, ys = np.meshgrid(xs, ys)
costs = np.vectorize(lambda x, y: costs[(x, y)])(xs, ys)
texts = np.vectorize(lambda x, y: texts[(x, y)])(xs, ys)
axes_names = ['param: ' + param_a, 'param: ' + param_b, 'cost']
plot3D.surface3D(xs, ys, costs, tooltips=texts, axes_names=axes_names, log_axes=log_axes)
class BayesianOptimisationOptimiserPlotting:
'''
a mixin for providing Bayesian Optimisation specific plotting capabilities
to a BayesianOptimisationOptimiser
'''
def _points_vary_one(self, point, param, xs):
'''
points generated by fixing all but one parameter to match the given
config, and varying the remaining parameter.
point: the configuration represented in point space to base the points on
param: the name of the parameter to vary
xs: the values to provide in place of config[param]
'''
assert len(xs.shape) == 1 # not 2D
# create many duplicates of the point version of the given configuration
points = np.repeat(point, len(xs), axis=0)
param_index = self.point_space.param_indices[param] # column to swap out for xs
points[:,param_index] = xs
return points
def plot_step_1D(self, param, step, true_cost=None, n_sigma=2, sur_through_all=True):
'''
plot a Bayesian optimisation step, perturbed along a single parameter.
the intuition for the case of a 1D configuration space is trivial: the
plot is simply the parameter value and the corresponding cost and
acquisition values. in 2D, imagine the surface plot of the two
parameters against cost (as the height). This plot takes a cross section
of that surface along the specified axis and passing through the point
of the next configuration to test to show how the acquisition function
varies along that dimension. The same holds for higher dimensions but
is harder to visualise.
param: the name of the parameter to perturb to obtain the graph
step: the number of the step to plot
true_cost: true cost function (or array of pre-computed cost values
corresponding to self.ranges[param]) (None to omit)
n_sigma: the number of standard deviations from the mean to plot the
uncertainty confidence interval.
Note 1=>68%, 2=>95%, 3=>99% (for a normal distribution, which this is)
sur_through_all: whether to plot a surrogate prediction through every
sample or just through the location of the next point to be chosen
'''
assert step in self.step_log, 'step not recorded in the log'
range_ = self.point_space.ranges[param]
assert range_.type_ in [RangeType.Linear, RangeType.Logarithmic]
s = self.step_log[step]
num_suggestions = len(s.suggestions)
all_xs = range_.values
# cases for different types of steps. Only certain cases are accounted
# for, because only these cases should happen under normal circumstances.
if not (num_suggestions == 1 or num_suggestions == 2):
raise ValueError(num_suggestions)
# extract and process the data from the first suggestion
sg1 = s.suggestions[0]
if isinstance(sg1, Step.MaxAcqSuggestion):
# combination of the concrete samples (sx, sy) and the hypothesised samples
# (hx, hy) if there are any
xs = np.vstack((s.sx, s.hx))
ys = np.vstack((s.sy, sg1.hy))
# restore from just the hyperparameters to a surrogate model which can be queried
sur = self.Surrogate(self)
sur.fit(xs, ys, hyper_params=sg1.sur)
acq_fun = self._get_acq_fun(sur, ys)
sims = False # no simulations for this strategy
# the surrogate model is trained in point space
ps_xs = self.point_space.param_to_point_space(all_xs, param)
# the values for the chosen parameter for the concrete and
# hypothesised samples in config space.
concrete_xs = self.point_space.param_to_config_space(s.sx, param).flatten()
concrete_ys = s.sy
hypothesised_xs = self.point_space.param_to_config_space(s.hx, param).flatten()
hypothesised_ys = sg1.hy
# the current best concrete sample (x is only the value along the
# chosen parameter in config space)
best_i = np.argmax(concrete_ys) if self.maximise_cost else np.argmin(concrete_ys)
best_concrete_x = concrete_xs[best_i]
best_concrete_y = concrete_ys[best_i]
# next point chosen by the acquisition function to be evaluated
acq_chosen_point = sg1.x
acq_chosen_x = self.point_space.param_to_config_space(acq_chosen_point, param)
acq_chosen_ac = sg1.ac
# the final choice of the step, replaced by the random fallback
# choice if there was one
chosen_point = acq_chosen_point
chosen_x = acq_chosen_x
elif isinstance(sg1, Step.MC_MaxAcqSuggestion):
# combination of the concrete samples (sx, sy) and the hypothesised samples
# (hx, hy) if there are any
xs = np.vstack((s.sx, s.hx))
# restore from just the hyperparameters to a surrogate model which can be queried
sur = self.Surrogate(self)
sur.fit(s.sx, s.sy, hyper_params=sg1.sur) # only concrete samples
sims = True # simulations used in this strategy
sim_surs = []
sim_ac_funs = [] # the acquisition functions for each simulation
for hy, sim_params in sg1.simulations:
ys = np.vstack((s.sy, hy))
sim_params = sim_params if sim_params is not None else sg1.sur
# fit the surrogate model to the points of this simulation
sim_sur = self.Surrogate(self)
sim_sur.fit(xs, ys, hyper_params=sim_params)
acq = self._get_acq_fun(sim_sur, ys) # partially apply
sim_ac_funs.append(acq)
sim_surs.append(sim_sur)
# average acquisition across every simulation
acq_fun = lambda xs: 1.0/len(sg1.simulations) * np.sum(acq(xs) for acq in sim_ac_funs)
# the surrogate model is trained in point space
ps_xs = self.point_space.param_to_point_space(all_xs, param)
# the values for the chosen parameter for the concrete and
# hypothesised samples in config space.
concrete_xs = self.point_space.param_to_config_space(s.sx, param).flatten()
concrete_ys = s.sy
hypothesised_xs = self.point_space.param_to_config_space(s.hx, param).flatten()
hypothesised_xs = np.vstack([make2D(hypothesised_xs)] * len(sg1.simulations))
hypothesised_ys = np.vstack([hy for hy, sim_sur in sg1.simulations])
# the current best concrete sample (x is only the value along the
# chosen parameter in config space)
best_i = np.argmax(concrete_ys) if self.maximise_cost else np.argmin(concrete_ys)
best_concrete_x = concrete_xs[best_i]
best_concrete_y = concrete_ys[best_i]
# next point chosen by the acquisition function to be evaluated
acq_chosen_point = sg1.x
acq_chosen_x = self.point_space.param_to_config_space(acq_chosen_point, param)
acq_chosen_ac = sg1.ac
# the final choice of the step, replaced by the random fallback
# choice if there was one
chosen_point = acq_chosen_point
chosen_x = acq_chosen_x
else:
raise NotImplementedError()
# extract and process the data from the second suggestion if there is one
if num_suggestions == 2:
sg2 = s.suggestions[1]
if isinstance(sg2, Step.RandomSuggestion):
random_fallback = True
random_chosen_point = sg2.x
random_chosen_x = self.point_space.param_to_config_space(random_chosen_point, param)
chosen_point = random_chosen_point
chosen_x = random_chosen_x
else:
raise NotImplementedError()
else:
random_fallback = False
if acq_fun:
# it makes sense to plot the acquisition function through the slice
# corresponding to the next point to be chosen, whether it is the
# suggestion by the acquisition function maximisation or the random
# suggestion.
ps_perturbed_points = self._points_vary_one(chosen_point, param, ps_xs)
ac = acq_fun(ps_perturbed_points)
if sims:
sim_ac = [acq(ps_perturbed_points) for acq in sim_ac_funs]
sim_mus = [sim_sur.predict(ps_perturbed_points).flatten() for sim_sur in sim_surs]
fig = plt.figure(figsize=(16, 10)) # inches
grid = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=[2, 1])
ax1, ax2 = fig.add_subplot(grid[0]), fig.add_subplot(grid[1])
title = 'Bayesian Optimisation step {}'.format(
step-self.strategy.pre_phase_steps)
if random_fallback:
title += ' (Random Fallback)'
fig.suptitle(title, fontsize=14)
ax1.margins(0.005, 0.05)
ax2.margins(0.005, 0.05)
if range_.type_ == RangeType.Logarithmic:
ax1.set_xscale('log')
ax2.set_xscale('log')
fig.subplots_adjust(hspace=0.3)
ax1.set_ylabel('cost')
ax1.set_title('Surrogate objective function')
### Plot True Cost
if true_cost is not None:
# true cost is either the cost function, or pre-computed costs as an array
true_ys = true_cost(all_xs) if callable(true_cost) else true_cost
ax1.plot(all_xs, true_ys, '--', color='#2f2f2f', label='true cost', linewidth=1.0)
### Plot Samples
# plot samples projected onto the `param` axis
ax1.plot(concrete_xs, concrete_ys, 'bo', markersize=6, label='samples', zorder=5)
if len(hypothesised_xs) > 0:
# there are some hypothesised samples
ax1.plot(hypothesised_xs, hypothesised_ys, 'o', color='tomato',
markersize=6, label='hypothesised samples', zorder=5)
ax1.plot(best_concrete_x, best_concrete_y, '*', markersize=15,
color='deepskyblue', zorder=10, label='best sample')
### Plot Surrogate Function
def plot_sur_prediction_through(point, mu_label, sigma_label, mu_alpha, sigma_alpha):
# points with all but the chosen parameter fixed to match the given
# config, but the chosen parameter varies
perturbed = self._points_vary_one(point, param, ps_xs)
mus, sigmas = sur.predict(perturbed, std_dev=True)
mus = mus.flatten()
ax1.plot(all_xs, mus, 'm-', label=mu_label, alpha=mu_alpha, linewidth=1.0)
ax1.fill_between(all_xs, mus - n_sigma*sigmas, mus + n_sigma*sigmas, alpha=sigma_alpha,
color='mediumpurple', label=sigma_label)
#TODO: fit the view to the cost function, don't expand to fit in the uncertainty
if sims:
for mus in sim_mus:
ax1.plot(all_xs, mus, 'm:', linewidth=1.0)
plot_sur_prediction_through(chosen_point,
mu_label='surrogate cost', sigma_label='uncertainty ${}\\sigma$'.format(n_sigma),
mu_alpha=1, sigma_alpha=0.25)
# plot the predictions through each sample
def predictions_through_all_samples():
# avoid drawing predictions of the same place more than once, so
# avoid duplicate configurations which are identical to another
# except for the value of 'param', since the plot varies this
# parameter: the resulting plot will be the same in both cases.
param_index = self.point_space.param_indices[param]
# a copy of the current samples with the focused parameter zeroed
# start with s.next_x since that is a point which is guaranteed to
# have a prediction plotted through it
param_zeroed = np.vstack((chosen_point, xs))
param_zeroed[0,param_index] = 0#TODO: shouldn't that be [:,param_index]?
param_zeroed = unique_rows_close(param_zeroed, close_tolerance=1e-3)
param_zeroed = param_zeroed[1:,:] # exclude chosen_point
if param_zeroed.shape[0] > 0:
# cap to make sure they don't become invisible
alpha = max(0.4/param_zeroed.shape[0], 0.015)
for row in param_zeroed:
plot_sur_prediction_through(make2D_row(row),
mu_label=None, sigma_label=None,
mu_alpha=alpha, sigma_alpha=alpha)
if sur_through_all:
predictions_through_all_samples()
### Plot Vertical Bars
bar_width = 1
ax1.axvline(x=chosen_x, linewidth=bar_width)
if random_fallback:
# in this case: chosen_x is the random choice
ax1.axvline(x=acq_chosen_x, color='y', linewidth=bar_width)
ax1.legend()
### Plot Acquisition Function
ax2.set_xlabel('parameter {}'.format(param))
ax2.set_ylabel(self.strategy.get_name(self.maximise_cost))
ax2.set_title('acquisition function')
# can be useful for observing the gradients of acquisition functions
# with very thin spikes.
#ax2.set_yscale('log')
if sims:
for s_ac in sim_ac:
ax2.plot(all_xs, s_ac, ':', color='g', linewidth=1.0)
ax2.plot(all_xs, ac, '-', color='g', linewidth=1.0, label='acquisition function')
ax2.fill_between(all_xs, np.zeros_like(all_xs), ac.flatten(), alpha=0.3, color='palegreen')
if random_fallback:
ax2.axvline(x=random_chosen_x, label='next sample', linewidth=bar_width)
label='$\\mathrm{{argmax}}\\; {}$'.format(
self.strategy.get_name(self.maximise_cost))
ax2.axvline(x=acq_chosen_x, color='y', label=label, linewidth=bar_width)
else:
ax2.axvline(x=acq_chosen_x, linewidth=bar_width)
ax2.plot(acq_chosen_x, acq_chosen_ac, 'b^', markersize=7,
zorder=10, label='next sample')
ax2.legend()
return fig
def plot_step_2D(self, x_param, y_param, step, true_cost=None,
plot_through='next', force_view_linear=False, mu_norm=None):
'''
x_param: the name of the parameter to place along the x axis
y_param: the name of the parameter to place along the y axis
step: the number of the step to plot
true_cost: a function (that takes x and y arguments) or meshgrid
containing the true cost values
plot_through: unlike the 1D step plotting, in 2D the heatmaps cannot be
easily overlaid to get a better understanding of the whole space.
Instead, a configuration can be provided to vary x_pram and y_param
but leave the others constant to produce the graphs.
Pass 'next' to signify the next sample (the one chosen by the
current step) or 'best' to signify the current best sample as-of the
current step. a configuration dict can also be passed
force_view_linear: force the images to be displayed with linear axes
even if the parameters are logarithmic
mu_norm: a BayesianOptimisationOptimiserPlotting.Norm object
Useful to warp the colormap so that more of the available colors are
used on the range of interesting data.
'''
assert step in self.step_log, 'step not recorded in the log'
x_range = self.point_space.ranges[x_param]
y_range = self.point_space.ranges[y_param]
assert all(type_ in (RangeType.Linear, RangeType.Logarithmic)
for type_ in (x_range.type_, y_range.type_))
x_is_log = x_range.type_ == RangeType.Logarithmic
y_is_log = y_range.type_ == RangeType.Logarithmic
s = self.step_log[step]
num_suggestions = len(s.suggestions)
all_xs, all_ys = x_range.values, y_range.values
# cases for different types of steps. Only certain cases are accounted
# for, because only these cases should happen under normal circumstances.
if not (num_suggestions == 1 or num_suggestions == 2):
raise ValueError(num_suggestions)
# extract and process the data from the first suggestion
sg1 = s.suggestions[0]
if isinstance(sg1, Step.MaxAcqSuggestion):
# combination of the concrete samples (sx, sy) and the hypothesised samples
# (hx, hy) if there are any
xs = np.vstack((s.sx, s.hx))
ys = np.vstack((s.sy, sg1.hy))
# restore from just the hyperparameters to a surrogate model which can be queried
sur = self.Surrogate(self)
sur.fit(xs, ys, hyper_params=sg1.sur)
acq_fun = self._get_acq_fun(sur, ys)
sims = False # no simulations for this strategy
# the surrogate model is trained in point space
ps_xs = self.point_space.param_to_point_space(all_xs, x_param)
ps_ys = self.point_space.param_to_point_space(all_ys, y_param)
# passed as 'first index', 'second index'
ps_X, ps_Y = np.meshgrid(ps_xs, ps_ys)
grid_size = (len(all_ys), len(all_xs))
assert grid_size == ps_X.shape == ps_Y.shape
# all combinations of x and y values, each point as a row
#TODO: would hstack work instead of transposing?
ps_points = np.vstack((ps_X.ravel(), ps_Y.ravel())).T # ravel squashes to 1D
# the values for the chosen parameter for the concrete and
# hypothesised samples in config space.
concrete_xs = self.point_space.param_to_config_space(s.sx, x_param).flatten()
concrete_ys = self.point_space.param_to_config_space(s.sx, y_param).flatten()
concrete_zs = s.sy
hypothesised_xs = self.point_space.param_to_config_space(s.hx, x_param).flatten()
hypothesised_ys = self.point_space.param_to_config_space(s.hx, y_param).flatten()
hypothesised_zs = sg1.hy
# the current best concrete sample
best_i = np.argmax(concrete_zs) if self.maximise_cost else np.argmin(concrete_zs)
best_concrete_x = concrete_xs[best_i]
best_concrete_y = concrete_ys[best_i]
best_concrete_z = concrete_zs[best_i]
# next point chosen by the acquisition function to be evaluated
acq_chosen_point = sg1.x
acq_chosen_x = self.point_space.param_to_config_space(acq_chosen_point, x_param)
acq_chosen_y = self.point_space.param_to_config_space(acq_chosen_point, y_param)
acq_chosen_ac = sg1.ac
chosen_point = acq_chosen_point
chosen_x = acq_chosen_x
chosen_y = acq_chosen_y
elif isinstance(sg1, Step.MC_MaxAcqSuggestion):
# combination of the concrete samples (sx, sy) and the hypothesised samples
# (hx, hy) if there are any
xs = np.vstack((s.sx, s.hx))
# restore from just the hyperparameters to a surrogate model which can be queried
sur = self.Surrogate(self)
sur.fit(s.sx, s.sy, hyper_params=sg1.sur) # only concrete samples
sims = True # simulations used in this strategy
sim_surs = []
sim_ac_funs = [] # the acquisition functions for each simulation
for hy, sim_params in sg1.simulations:
ys = np.vstack((s.sy, hy))
sim_params = sim_params if sim_params is not None else sg1.sur
# fit the surrogate model to the points of this simulation
sim_sur = self.Surrogate(self)
sim_sur.fit(xs, ys, hyper_params=sim_params)
acq = self._get_acq_fun(sim_sur, ys) # partially apply
sim_ac_funs.append(acq)
sim_surs.append(sim_sur)
# average acquisition across every simulation
acq_fun = lambda xs: 1.0/len(sg1.simulations) * np.sum(acq(xs) for acq in sim_ac_funs)
# the surrogate model is trained in point space
ps_xs = self.point_space.param_to_point_space(all_xs, x_param)
ps_ys = self.point_space.param_to_point_space(all_ys, y_param)
# passed as 'first index', 'second index'
ps_X, ps_Y = np.meshgrid(ps_xs, ps_ys)
grid_size = (len(all_ys), len(all_xs))
assert grid_size == ps_X.shape == ps_Y.shape
# all combinations of x and y values, each point as a row
#TODO: would hstack work instead of transposing?
ps_points = np.vstack((ps_X.ravel(), ps_Y.ravel())).T # ravel squashes to 1D
# the values for the chosen parameter for the concrete and
# hypothesised samples in config space.
concrete_xs = self.point_space.param_to_config_space(s.sx, x_param).flatten()
concrete_ys = self.point_space.param_to_config_space(s.sx, y_param).flatten()
concrete_zs = s.sy
hypothesised_xs = self.point_space.param_to_config_space(s.hx, x_param).flatten()
hypothesised_ys = self.point_space.param_to_config_space(s.hx, y_param).flatten()
hypothesised_xs = np.vstack([make2D(hypothesised_xs)] * len(sg1.simulations))
hypothesised_ys = np.vstack([make2D(hypothesised_ys)] * len(sg1.simulations))
hypothesised_zs = np.vstack([hy for hy, sim_sur in sg1.simulations])
# the current best concrete sample
best_i = np.argmax(concrete_zs) if self.maximise_cost else np.argmin(concrete_zs)
best_concrete_x = concrete_xs[best_i]
best_concrete_y = concrete_ys[best_i]
best_concrete_z = concrete_zs[best_i]
# next point chosen by the acquisition function to be evaluated
acq_chosen_point = sg1.x
acq_chosen_x = self.point_space.param_to_config_space(acq_chosen_point, x_param)
acq_chosen_y = self.point_space.param_to_config_space(acq_chosen_point, y_param)
acq_chosen_ac = sg1.ac
chosen_point = acq_chosen_point
chosen_x = acq_chosen_x
chosen_y = acq_chosen_y
else:
raise NotImplementedError()
# extract and process the data from the second suggestion if there is one
if num_suggestions == 2:
sg2 = s.suggestions[1]
if isinstance(sg2, Step.RandomSuggestion):
random_fallback = True
random_chosen_point = sg2.x
random_chosen_x = self.point_space.param_to_config_space(random_chosen_point, x_param)
random_chosen_y = self.point_space.param_to_config_space(random_chosen_point, y_param)
chosen_point = random_chosen_point
chosen_x = random_chosen_x
chosen_y = random_chosen_y
else:
raise NotImplementedError()
else:
random_fallback = False
# determine the point to plot through
if isinstance(plot_through, dict):
plot_through = self.config_to_point(plot_through)
else:
plot_through = plot_through.lower()
if plot_through == 'next':
plot_through = chosen_point
elif plot_through == 'best':
best_i = np.argmax(concrete_ys) if self.maximise_cost else np.argmin(concrete_ys)
plot_through = make2D_row(s.sx[best_i])
else:
raise ValueError(plot_through)
# points with all but the chosen parameter fixed to match the given
# config, but the focused parameters vary
ps_perturbed_points = self._points_vary_one(plot_through, x_param, ps_points[:,0])
y_index = self.point_space.param_indices[y_param]
ps_perturbed_points[:,y_index] = ps_points[:,1]
if acq_fun:
mus, sigmas = sur.predict(ps_perturbed_points, std_dev=True)
mus = mus.reshape(*grid_size)
sigmas = sigmas.reshape(*grid_size)
ac = acq_fun(ps_perturbed_points)
ac = ac.reshape(*grid_size)
fig = plt.figure(figsize=(16, 16)) # inches
grid = gridspec.GridSpec(nrows=2, ncols=2)
# layout:
# ax1 ax2
# ax3 ax4
ax1 = fig.add_subplot(grid[0])
ax3, ax4 = fig.add_subplot(grid[2]), fig.add_subplot(grid[3])
ax2 = fig.add_subplot(grid[1]) if true_cost is not None else None
axes = (ax1, ax2, ax3, ax4) if true_cost is not None else (ax1, ax3, ax4)
for ax in axes:
ax.set_xlim(x_range.bounds)
if x_is_log and not force_view_linear:
ax.set_xscale('log')
ax.set_ylim(y_range.bounds)
if y_is_log and not force_view_linear:
ax.set_yscale('log')
ax.grid(False)
# need to specify rect so that the suptitle isn't cut off
fig.tight_layout(h_pad=3, w_pad=8, rect=[0, 0, 1, 0.96]) # [left, bottom, right, top] 0-1
title = 'Bayesian Optimisation step {}'.format(step-self.strategy.pre_phase_steps)
if random_fallback:
title += ' (Random Fallback)'
fig.suptitle(title, fontsize=20)
def plot_heatmap(ax, data, colorbar, cmap, norm=None):
# pcolormesh is better than imshow because: no need to fiddle around
# with extents and aspect ratios because the x and y values can be
# fed in and so just works. This also prevents the problem of the
# origin being in the wrong place. It is compatible with log scaled
# axes unlike imshow. There is no interpolation by default unlike
# imshow.
im = ax.pcolormesh(all_xs, all_ys, data, cmap=cmap, norm=norm)
if colorbar:
levels = norm.levels() if norm is not None else None
c = fig.colorbar(im, ax=ax, pad=0.01, fraction=0.051, boundaries=levels)
c.set_label('cost')
ax.set_xlabel('parameter {}'.format(x_param))
ax.set_ylabel('parameter {}'.format(y_param))
ax.plot(best_concrete_x, best_concrete_y, '*', markersize=15,
color='deepskyblue', zorder=10,
markeredgecolor='black', markeredgewidth=1.0, label='best sample')
ax.plot(concrete_xs, concrete_ys, 'ro', markersize=4,
linestyle='None', label='samples')
if len(hypothesised_xs) > 0:
ax.plot(hypothesised_xs, hypothesised_ys, 'o', color='#dcdcdc',
linestyle='None', markersize=4, label='hypothesised samples')
ax.plot(chosen_x, chosen_y, marker='d', color='orangered',
markeredgecolor='black', markeredgewidth=1.0, markersize=10,
linestyle='None', label='next sample')
title_size = 16
cmap = 'viridis'
# reverse the color map if minimising
cmap_match_direction = cmap if self.maximise_cost else cmap + '_r' # reversed
ax1.set_title('Surrogate $\\mu$', fontsize=title_size)
im = plot_heatmap(ax1, mus, colorbar=True, cmap=cmap_match_direction, norm=mu_norm)
ax3.set_title('Surrogate $\\sigma$', fontsize=title_size)
plot_heatmap(ax3, sigmas, colorbar=True, cmap=cmap)
if true_cost is not None:
ax2.set_title('True Cost', fontsize=title_size)
plot_heatmap(ax2, true_cost, colorbar=True, cmap=cmap_match_direction)
ax4.set_title('Acquisition Function', fontsize=title_size)
plot_heatmap(ax4, ac, colorbar=True, cmap=cmap)
if random_fallback:
label='$\\mathrm{{argmax}}\\; {}$'.format(
self.strategy.get_name(self.maximise_cost))
ax4.axvline(x=acq_chosen_x, color='y')
ax4.axhline(y=acq_chosen_y, color='y', label=label)
if true_cost is None:
ax4.legend(bbox_to_anchor=(0, 1.01), loc='lower left', borderaxespad=0.0)
else:
legend = ax2.legend(frameon=True, fancybox=True, loc='lower left')
legend.get_frame().set_facecolor('#ffffff')
legend.get_frame().set_alpha(0.5)
return fig
def num_randomly_chosen(self):
count = 0
for s in self.samples:
is_pre_sample = s.job_ID <= self.strategy.pre_phase_steps
is_random = (s.job_ID in self.step_log and
isinstance(self.step_log[s.job_ID].suggestions[-1], Step.RandomSuggestion))
if is_pre_sample or is_random:
count += 1
return count
def plot_cost_over_time(self, plot_each=True, plot_best=True,
true_best=None, plot_random=True):
'''
plot a line graph showing the progress that the optimiser makes towards
the optimum as the number of samples increases.
plot_each: plot the cost of each sample
plot_best: plot the running-best cost
true_best: if available: plot a horizontal line for the best possible cost
plot_random: whether to plot markers over the samples which were chosen randomly
'''
fig = OptimiserPlotting.plot_cost_over_time(self, plot_each, plot_best, true_best)
ax = fig.axes[0]
if plot_random:
random_sample_nums = []
random_sample_costs = []
for i, s in enumerate(self.samples):
is_pre_sample = s.job_ID <= self.strategy.pre_phase_steps
is_random = s.job_ID in self.step_log and self.step_log[s.job_ID].chosen_at_random()
if is_pre_sample or is_random:
random_sample_nums.append(i+1)
random_sample_costs.append(s.cost)
ax.plot(random_sample_nums, random_sample_costs, 'ro', markersize=5, label='randomly chosen')
ax.margins(0.0, 0.18)
ax.legend()
def sample_num_to_bayes_step(s_num):
i = int(s_num)-1
if i >= 0 and i < len(self.samples):
s = self.samples[i]
if s.job_ID in self.step_log:
return s.job_ID - self.strategy.pre_phase_steps
else:
return ''
else:
return ''
labels = [sample_num_to_bayes_step(s_num) for s_num in ax.get_xticks()]
ax2 = ax.twiny() # 'twin y'
ax2.grid(False)
ax2.set_xlim(ax.get_xlim())
ax2.xaxis.set_major_locator(ax.xaxis.get_major_locator())
# convert the labels marked on ax into new labels for the top
ax2.set_xticklabels(labels)
ax2.set_xlabel('Bayesian Step')
# raise the title to get out of the way of ax2
ax.title.set_position([0.5, 1.08])
return fig
|
gpl-3.0
|
MarcoFiorucci/dense_graph_reducer
|
misc/display_segmentations.py
|
1
|
1312
|
import numpy as np
import os
import matplotlib.pyplot
img_dir_path = "..//test_data//images"
img_res_path = "..//test_data//results//resize_factor_0_25"
alon_epsilons = np.arange(0.5, 0.8, 0.05)
FK_epsilons = np.arange(0.2, 0.5, 0.05)
if __name__ == "__main__":
one_stage = False
two_stage = True
if one_stage:
for img_path in sorted(os.listdir(os.path.normpath(img_dir_path))):
img_name = os.path.splitext(img_path)[0]
ar = np.load(os.path.normpath(img_res_path + "//one_stage" + "//" + img_name + "//" + img_name + ".npy"))
matplotlib.pyplot.matshow(ar)
matplotlib.pyplot.show()
if two_stage:
for img_path in sorted(os.listdir(os.path.normpath(img_dir_path))):
for eps in alon_epsilons:
img_name = os.path.splitext(img_path)[0]
if os.path.exists(os.path.normpath(img_res_path + "//two_stage" + "//alon//" + img_name + "//" + img_name + "_eps_" + str.replace(format(eps, '.2f'), ".", "_") + ".npy")):
ar = np.load(os.path.normpath(img_res_path + "//two_stage" + "//alon//" + img_name + "//" + img_name + "_eps_" + str.replace(format(eps, '.2f'), ".", "_") + ".npy"))
matplotlib.pyplot.matshow(ar)
matplotlib.pyplot.show()
|
apache-2.0
|
drdavis/rootplotlib
|
unit_tests/th2d_toy.py
|
2
|
1493
|
import ROOT
from ROOT import TRandom3
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
rand = TRandom3(0)
#Make TCanvas+TH2D
#c1 = ROOT.TCanvas()
#c1.cd()
th2d = ROOT.TH2D("th",";;",15,0,5,10,0,2)
#Fill with TRandom data
for i in xrange(100000):
th2d.Fill(rand.Gaus(.5,0.1),
rand.Gaus(.5,0.1))
#Draw TH2D
#th2d.Draw("COLZ")
#c1.Update()
# numpy read raw TArray buffer that TH2D is sitting on, +2 on box nBins...
data = np.frombuffer(th2d.GetArray(),count=th2d.GetSize())
#split the data on Y
data = np.array(np.split(data,th2d.GetNbinsY()+2))
#remove overflow/underflow
data = data[1:-1,1:-1]
#draw in pyplot
fig,ax = plt.subplots(figsize=(10,6))
#matshow mimics output of TH2D.Draw("COLZ"), put the origin a the bottom like ROOT
cb = ax.matshow(data,origin='lower')
#Move the xticks down there
ax.xaxis.set_ticks_position('bottom')
#Set the XTicks to be the same as default on TCanvas
nticks = float(10.0)
nbinsx = th2d.GetNbinsX()
print nbinsx
nbinsy = th2d.GetNbinsY()
ax.set_xticks(np.arange(0,nbinsx,1))
ax.set_yticks(np.arange(0,nbinsy,1))
#Set the labels to match whats on TCanvas
print np.arange(0,nbinsx,nticks)
print np.around(np.linspace(0,th2d.GetXaxis().GetXmax(),nbinsx),2)
ax.set_xticklabels(np.around(np.linspace(0,th2d.GetXaxis().GetXmax(),nbinsx),2))
ax.set_yticklabels(np.around(np.linspace(0,th2d.GetYaxis().GetXmax(),nbinsy),2)) #ROOT doesn't have GetYmax lol
#Z axis like root
plt.colorbar(cb)
#Draw
plt.show()
|
mit
|
qifeigit/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
130
|
6059
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
|
bsd-3-clause
|
alan-unravel/bokeh
|
bokeh/compat/mplexporter/exporter.py
|
32
|
12403
|
"""
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
|
bsd-3-clause
|
Troesler95/csit463-DIP
|
classify_cnn.py
|
1
|
1070
|
import cv2
import numpy as np
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers.core import Dense
def load_model():
model = Sequential()
model.add(Dense(256, input_dim=16384, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation="relu"))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
model.load_weights("64-soft-50-CNN18.hdf5")
return model
def get_features(potentials):
hog = cv2.HOGDescriptor()
pots = np.array([np.array(hog.computeGradient(
potential)).flatten() for potential in potentials])
print(pots[0].shape)
return pots
def classify_DNN(pot_signs):
model = load_model()
print(pot_signs[0].shape)
features = get_features(pot_signs)
print(features[0].shape)
predictions = model.predict_classes(features, batch_size=len(pot_signs))
return predictions
|
gpl-3.0
|
jve2kor/machine-learning-nanodegree
|
projects/finding_donors/visuals.py
|
1
|
5534
|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score, accuracy_score
def distribution(data, transformed = False):
"""
Visualization code for displaying skewed distributions of features
"""
# Create figure
fig = pl.figure(figsize = (11,5));
# Skewed feature plotting
for i, feature in enumerate(['capital-gain','capital-loss']):
ax = fig.add_subplot(1, 2, i+1)
ax.hist(data[feature], bins = 25, color = '#00A0A0')
ax.set_title("'%s' Feature Distribution"%(feature), fontsize = 14)
ax.set_xlabel("Value")
ax.set_ylabel("Number of Records")
ax.set_ylim((0, 2000))
ax.set_yticks([0, 500, 1000, 1500, 2000])
ax.set_yticklabels([0, 500, 1000, 1500, ">2000"])
# Plot aesthetics
if transformed:
fig.suptitle("Log-transformed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
else:
fig.suptitle("Skewed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
fig.tight_layout()
fig.show()
def evaluate(results, accuracy, f1):
"""
Visualization code to display results of various learners.
inputs:
- learners: a list of supervised learners
- stats: a list of dictionaries of the statistic results from 'train_predict()'
- accuracy: The score for the naive predictor
- f1: The score for the naive predictor
"""
# Create figure
fig, ax = pl.subplots(2, 3, figsize = (11,7))
# Constants
bar_width = 0.3
colors = ['#A00000','#00A0A0','#00A000']
# Super loop to plot four panels of data
for k, learner in enumerate(results.keys()):
for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']):
for i in np.arange(3):
# Creative plot code
ax[j/3, j%3].bar(i+k*bar_width, results[learner][i][metric], width = bar_width, color = colors[k])
ax[j/3, j%3].set_xticks([0.45, 1.45, 2.45])
ax[j/3, j%3].set_xticklabels(["1%", "10%", "100%"])
ax[j/3, j%3].set_xlabel("Training Set Size")
ax[j/3, j%3].set_xlim((-0.1, 3.0))
# Add unique y-labels
ax[0, 0].set_ylabel("Time (in seconds)")
ax[0, 1].set_ylabel("Accuracy Score")
ax[0, 2].set_ylabel("F-score")
ax[1, 0].set_ylabel("Time (in seconds)")
ax[1, 1].set_ylabel("Accuracy Score")
ax[1, 2].set_ylabel("F-score")
# Add titles
ax[0, 0].set_title("Model Training")
ax[0, 1].set_title("Accuracy Score on Training Subset")
ax[0, 2].set_title("F-score on Training Subset")
ax[1, 0].set_title("Model Predicting")
ax[1, 1].set_title("Accuracy Score on Testing Set")
ax[1, 2].set_title("F-score on Testing Set")
# Add horizontal lines for naive predictors
ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
# Set y-limits for score panels
ax[0, 1].set_ylim((0, 1))
ax[0, 2].set_ylim((0, 1))
ax[1, 1].set_ylim((0, 1))
ax[1, 2].set_ylim((0, 1))
# Create patches for the legend
patches = []
for i, learner in enumerate(results.keys()):
patches.append(mpatches.Patch(color = colors[i], label = learner))
pl.legend(handles = patches, bbox_to_anchor = (-.80, 2.53), \
loc = 'upper center', borderaxespad = 0., ncol = 3, fontsize = 'x-large')
# Aesthetics
pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10)
pl.tight_layout()
pl.show()
def feature_plot(importances, X_train, y_train):
# Display the five most important features
indices = np.argsort(importances)[::-1]
columns = X_train.columns.values[indices[:5]]
values = importances[indices][:5]
# Creat the plot
fig = pl.figure(figsize = (9,5))
pl.title("Normalized Weights for First Five Most Predictive Features", fontsize = 16)
pl.bar(np.arange(5), values, width = 0.6, align="center", color = '#00A000', \
label = "Feature Weight")
pl.bar(np.arange(5) - 0.3, np.cumsum(values), width = 0.2, align = "center", color = '#00A0A0', \
label = "Cumulative Feature Weight")
pl.xticks(np.arange(5), columns)
pl.xlim((-0.5, 4.5))
pl.ylabel("Weight", fontsize = 12)
pl.xlabel("Feature", fontsize = 12)
pl.legend(loc = 'upper center')
pl.tight_layout()
pl.show()
|
gpl-3.0
|
imaculate/scikit-learn
|
benchmarks/bench_plot_approximate_neighbors.py
|
244
|
6011
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
bsd-3-clause
|
abimannans/scikit-learn
|
examples/plot_isotonic_regression.py
|
303
|
1767
|
"""
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
|
bsd-3-clause
|
rs2/pandas
|
pandas/tests/arithmetic/test_array_ops.py
|
4
|
1051
|
import operator
import numpy as np
import pytest
import pandas._testing as tm
from pandas.core.ops.array_ops import comparison_op, na_logical_op
def test_na_logical_op_2d():
left = np.arange(8).reshape(4, 2)
right = left.astype(object)
right[0, 0] = np.nan
# Check that we fall back to the vec_binop branch
with pytest.raises(TypeError, match="unsupported operand type"):
operator.or_(left, right)
result = na_logical_op(left, right, operator.or_)
expected = right
tm.assert_numpy_array_equal(result, expected)
def test_object_comparison_2d():
left = np.arange(9).reshape(3, 3).astype(object)
right = left.T
result = comparison_op(left, right, operator.eq)
expected = np.eye(3).astype(bool)
tm.assert_numpy_array_equal(result, expected)
# Ensure that cython doesn't raise on non-writeable arg, which
# we can get from np.broadcast_to
right.flags.writeable = False
result = comparison_op(left, right, operator.ne)
tm.assert_numpy_array_equal(result, ~expected)
|
bsd-3-clause
|
YihaoLu/statsmodels
|
statsmodels/tools/tests/test_data.py
|
36
|
1758
|
import pandas
import numpy as np
from statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames.tolist(), [0,2,3,4,5,6,7,8,9])
def test_structarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X)
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_recarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X.view(np.recarray))
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_dataframe():
X = np.random.random((10,5))
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(vals, df.values)
np.testing.assert_equal(rnames.tolist(), df.index.tolist())
np.testing.assert_equal(cnames, df.columns.tolist())
def test_patsy_577():
X = np.random.random((10, 2))
df = pandas.DataFrame(X, columns=["var1", "var2"])
from patsy import dmatrix
endog = dmatrix("var1 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, None))
exog = dmatrix("var2 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, exog))
|
bsd-3-clause
|
hayd/SimpleCV
|
SimpleCV/Shell/Shell.py
|
10
|
7838
|
#!/usr/bin/python
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# SimpleCV
# a kinder, gentler machine vision python library
#-----------------------------------------------------------------------
# SimpleCV is an interface for Open Source machine
# vision libraries in Python.
# It provides a concise, readable interface for cameras,
# image manipulation, feature extraction, and format conversion.
# Our mission is to give casual users a comprehensive interface
# for basic machine vision functions and an
# elegant programming interface for advanced users.
#
# more info:
# http://www.simplecv.org
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#load system libraries
from subprocess import call
import platform
import webbrowser
import sys
from SimpleCV.__init__ import *
#Load simpleCV libraries
from SimpleCV.Shell.Tutorial import *
from SimpleCV.Shell.Example import *
try:
from SimpleCV import __version__ as SIMPLECV_VERSION
except ImportError:
SIMPLECV_VERSION = ''
#Command to clear the shell screen
def shellclear():
if platform.system() == "Windows":
return
call("clear")
#method to get magic_* methods working in bpython
def make_magic(method):
def wrapper(*args, **kwargs):
if not args:
return method('', '')
return method('', *args, **kwargs)
return wrapper
def plot(arg):
try:
import matplotlib.pyplot as plt
except ImportError:
logger.warning("Matplotlib is not installed and required")
return
print "args", arg
print "type", type(arg)
plt.plot(arg)
plt.show()
def hist(arg):
try:
import pylab
except ImportError:
logger.warning("pylab is not installed and required")
return
plot(pylab.hist(arg)[1])
def magic_clear(self, arg):
shellclear()
def magic_forums(self, arg):
webbrowser.open('http://help.simplecv.org/questions/')
def magic_walkthrough(self, arg):
webbrowser.open('http://examples.simplecv.org/en/latest/')
def magic_docs(self, arg):
webbrowser.open('http://www.simplecv.org/docs/')
banner = '+-----------------------------------------------------------+\n'
banner += ' SimpleCV '
banner += SIMPLECV_VERSION
banner += ' [interactive shell] - http://simplecv.org\n'
banner += '+-----------------------------------------------------------+\n'
banner += '\n'
banner += 'Commands: \n'
banner += '\t"exit()" or press "Ctrl+ D" to exit the shell\n'
banner += '\t"clear()" to clear the shell screen\n'
banner += '\t"tutorial()" to begin the SimpleCV interactive tutorial\n'
banner += '\t"example()" gives a list of examples you can run\n'
banner += '\t"forums()" will launch a web browser for the help forums\n'
banner += '\t"walkthrough()" will launch a web browser with a walkthrough\n'
banner += '\n'
banner += 'Usage:\n'
banner += '\tdot complete works to show library\n'
banner += '\tfor example: Image().save("/tmp/test.jpg") will dot complete\n'
banner += '\tjust by touching TAB after typing Image().\n'
banner += '\n'
banner += 'Documentation:\n'
banner += '\thelp(Image), ?Image, Image?, or Image()? all do the same\n'
banner += '\t"docs()" will launch webbrowser showing documentation'
banner += '\n'
exit_msg = '\n... [Exiting the SimpleCV interactive shell] ...\n'
def setup_ipython():
try:
import IPython
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
cfg.PromptManager.in_template = "SimpleCV:\\#> "
cfg.PromptManager.out_template = "SimpleCV:\\#: "
#~ cfg.InteractiveShellEmbed.prompt_in1 = "SimpleCV:\\#> "
#~ cfg.InteractiveShellEmbed.prompt_out="SimpleCV:\\#: "
scvShell = InteractiveShellEmbed(config=cfg, banner1=banner,
exit_msg=exit_msg)
scvShell.define_magic("tutorial", magic_tutorial)
scvShell.define_magic("clear", magic_clear)
scvShell.define_magic("example", magic_examples)
scvShell.define_magic("forums", magic_forums)
scvShell.define_magic("walkthrough", magic_walkthrough)
scvShell.define_magic("docs", magic_docs)
except ImportError:
try:
from IPython.Shell import IPShellEmbed
argsv = ['-pi1', 'SimpleCV:\\#>', '-pi2', ' .\\D.:', '-po',
'SimpleCV:\\#>', '-nosep']
scvShell = IPShellEmbed(argsv)
scvShell.set_banner(banner)
scvShell.set_exit_msg(exit_msg)
scvShell.IP.api.expose_magic("tutorial", magic_tutorial)
scvShell.IP.api.expose_magic("clear", magic_clear)
scvShell.IP.api.expose_magic("example", magic_examples)
scvShell.IP.api.expose_magic("forums", magic_forums)
scvShell.IP.api.expose_magic("walkthrough", magic_walkthrough)
scvShell.IP.api.expose_magic("docs", magic_docs)
except ImportError:
raise
return scvShell()
def setup_bpython():
import bpython
example = make_magic(magic_examples)
clear = make_magic(magic_clear)
docs = make_magic(magic_docs)
tutorial = make_magic(magic_tutorial)
walkthrough = make_magic(magic_walkthrough)
forums = make_magic(magic_forums)
temp = locals().copy()
temp.update(globals())
return bpython.embed(locals_=temp, banner=banner)
def setup_plain():
import code
return code.interact(banner=banner, local=globals())
def run_notebook(mainArgs):
if IPython.__version__.startswith('1.'):
"""Run the ipython notebook server"""
from IPython.html import notebookapp
from IPython.html.services.kernels import kernelmanager
else:
from IPython.frontend.html.notebook import notebookapp
from IPython.frontend.html.notebook import kernelmanager
code = ""
code += "from SimpleCV import *;"
code += "init_options_handler.enable_notebook();"
kernelmanager.MappingKernelManager.first_beat = 30.0
app = notebookapp.NotebookApp.instance()
mainArgs += [
'--port', '5050',
'--c', code,
]
app.initialize(mainArgs)
app.start()
sys.exit()
def self_update():
URL = "https://github.com/sightmachine/SimpleCV/zipball/master"
command = "pip install -U %s" % URL
if os.getuid() == 0:
command = "sudo " + command
returncode = call(command, shell=True)
sys.exit()
def run_shell(shell=None):
shells = ['setup_ipython', 'setup_bpython', 'setup_plain']
available_shells = [shell] if shell else shells
for shell in available_shells:
try:
return globals()[shell]()
except ImportError:
pass
raise ImportError
def main(*args):
log_level = logging.WARNING
interface = None
if len(sys.argv) > 1 and len(sys.argv[1]) > 1:
flag = sys.argv[1]
if flag == 'notebook':
run_notebook(sys.argv[1:])
sys.exit()
elif flag == 'update':
print "Updating SimpleCV....."
self_update()
if flag in ['--headless', 'headless']:
# set SDL to use the dummy NULL video driver,
# so it doesn't need a windowing system.
os.environ["SDL_VIDEODRIVER"] = "dummy"
elif flag in ['--nowarnings', 'nowarnings']:
log_level = logging.INFO
elif flag in ['--debug', 'debug']:
log_level = logging.DEBUG
if flag in ['--ipython', 'ipython']:
interface = 'setup_ipython'
elif flag in ['--bpython', 'bpython']:
interface = 'setup_bpython'
else:
interface = 'setup_plain'
init_logging(log_level)
shellclear()
scvShell = run_shell(interface)
|
bsd-3-clause
|
jshiv/turntable
|
test/lib/python2.7/site-packages/scipy/signal/spectral.py
|
9
|
13829
|
"""Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy.lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz if `x` is measured in V and computing
the power spectrum ('spectrum') where `Pxx` has units of V**2 if `x` is
measured in V. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
# numpy 1.5.1 doesn't have result_type.
outdtype = (np.array([x[0]]) * np.array([1], 'f')).dtype.char.lower()
if win.dtype != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not detrend:
detrend_func = lambda seg: seg
elif not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1]-nperseg+1, step)
if np.isrealobj(x) and return_onesided:
outshape = list(x.shape)
if nfft % 2 == 0: # even
outshape[-1] = nfft // 2 + 1
Pxx = np.empty(outshape, outdtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
# fftpack.rfft returns the positive frequency part of the fft
# as real values, packed r r i r i r i ...
# this indexing is to extract the matching real and imaginary
# parts, while also handling the pure real zero and nyquist
# frequencies.
if k == 0:
Pxx[..., (0,-1)] = xft[..., (0,-1)]**2
Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0)
Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
else: # odd
outshape[-1] = (nfft+1) // 2
Pxx = np.empty(outshape, outdtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
if k == 0:
Pxx[..., 0] = xft[..., 0]**2
Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., 0] += xft[..., 0]**2 / (k+1)
Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
Pxx[..., 1:-1] *= 2*scale
Pxx[..., (0,-1)] *= scale
f = np.arange(Pxx.shape[-1]) * (fs/nfft)
else:
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.fft(x_dt*win, nfft)
if k == 0:
Pxx = (xft * xft.conj()).real
else:
Pxx *= k/(k+1.0)
Pxx += (xft * xft.conj()).real / (k+1.0)
Pxx *= scale
f = fftpack.fftfreq(nfft, 1.0/fs)
if axis != -1:
Pxx = np.rollaxis(Pxx, -1, axis)
return f, Pxx
|
mit
|
matthiasdiener/spack
|
var/spack/repos/builtin/packages/py-quast/package.py
|
5
|
2252
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyQuast(PythonPackage):
"""Quality Assessment Tool for Genome Assemblies"""
homepage = "http://cab.spbu.ru/software/quast"
url = "https://github.com/ablab/quast/archive/quast_4.6.1.tar.gz"
version('4.6.3', '16d77acb2e0f6436b58d9df7b732fb76')
version('4.6.1', '37ccd34e0040c17aa6f990353a92475c')
version('4.6.0', 'c04d62c50ec4d9caa9d7388950b8d144')
depends_on('[email protected]')
depends_on('[email protected]:')
depends_on('[email protected]:,3.3:')
depends_on('py-setuptools', type='build')
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('java', type=('build', 'run'))
depends_on('perl-time-hires', type=('build', 'run'))
depends_on('gnuplot', type=('build', 'run'))
depends_on('mummer', type=('build', 'run'))
depends_on('bedtools2', type=('build', 'run'))
depends_on('bwa', type=('build', 'run'))
depends_on('glimmer', type=('build', 'run'))
|
lgpl-2.1
|
evgchz/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
13
|
10412
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_n_jobs():
"""
Test for the n_jobs parameter on the fit method and the constructor
"""
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf_fit = clf.fit(X, Y, 4)
assert_equal(clf_fit.n_jobs, clf.n_jobs)
assert_equal(clf.n_jobs, 1)
def test_fit_intercept():
"""
Test assertions on betas shape.
"""
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
"""Test output format of sparse_center_data, when input is csr"""
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
|
bsd-3-clause
|
jclee81/sktacc
|
sktps/admin.py
|
1
|
8430
|
#!/usr/bin/env python
import json
import threading
import time
from datetime import datetime
import pandas as pd
import redis
from flask import Flask
from flask_cors import CORS
from flask_restful import Api, Resource
import util
from train import TrainCenter
from util.config import config
from util.log import log
from util.singleton import SingletonMixin
app = Flask(__name__)
CORS(app)
api = Api(app)
LOOP_INTERVAL_SEC = 5
class LogCollector(SingletonMixin):
def __init__(self):
super(LogCollector, self).__init__()
info = config['pubsub']
self.host = info[0]
self.port = int(info[1])
self.r = redis.StrictRedis(host=self.host, port=self.port, db=0)
self.p = self.r.pubsub()
self.p.psubscribe('*')
def collect(self):
# TODO: get message from pub/sub server
while True:
raw_message = self.p.get_message()
if not raw_message:
break
# log.warn('raw_message: %s' % raw_message)
self._handle(raw_message)
def _handle(self, raw_message):
data = util.extract_json2(raw_message)
if data is None or 'key' not in data:
return
key = data['key']
# if key == 'START_ML_WORKER':
# worker_id = data['worker_id']
# Center().connect_ml_worker(worker_id)
# elif key == 'START_ML_TRAIN':
# worker_id = data['worker_id']
# code_name = data['code_name']
# train_id = data['train_id']
# Center().start_train(worker_id, code_name, train_id)
# elif key == 'FINISH_ML_TRAIN':
# worker_id = data['worker_id']
# code_name = data['code_name']
# train_id = data['train_id']
# Center().finish_train(worker_id, code_name, train_id)
# elif key == 'REGISTER_TRAIN':
# Center().register_train(data)
if key == 'UPDATE_PS':
Center().update_ps(data)
elif key == 'UPDATE_PS_DETAIL':
Center().update_ps_detail(data)
elif key == 'MEASUREMENT':
Center().update_measurement(data)
elif key == 'TRAIN_NOW':
TrainCenter().train_now(data)
elif key == 'set_variable':
pass
elif key == 'average':
pass
else:
log.error('IMPME: %s' % key)
def start_sub_log_and_command():
log.warn('START THREAD: admin / subscribe log and command')
while True:
LogCollector().collect()
# time.sleep(0.001)
Center().loop_count += 1
time.sleep(LOOP_INTERVAL_SEC)
def start_train_center():
log.warn('START THREAD: admin / train-center')
while True:
TrainCenter().update()
time.sleep(LOOP_INTERVAL_SEC)
class MeasureContainer(object):
def __init__(self):
self.train_ids = set([])
self.group_ids = set([])
self.ps_dict = {} # group_id: { worker_id: data }
self.controller_dict = {} # group_id: data
self.df = pd.DataFrame(
columns=[
'train_id', 'group_id', 'worker_id', 'parallel_count',
'load_rtt', 'save_rtt', 'controller_rtt',
'data_size', 'success', 'cal',
],
dtype='float')
def _to_list(self, data):
load_end = int(data['num_01_after_load_variables'])
load_start = int(data['num_01_before_load_variables'])
load_rtt = load_end - load_start
save_end = int(data['num_02_after_save_variables'])
save_start = int(data['num_02_before_save_variables'])
save_rtt = save_end - save_start
controller_rtt = int(data['num_05_after_pub_on_controller']) - int(
data['num_03_after_get_on_controller'])
cal = int(data['cal'])
success = 1
return [
data['train_id'],
data['group_id'],
data['worker_id'],
data['parallel_count'],
load_rtt,
save_rtt,
controller_rtt,
data['data_size'],
success,
cal,
]
def update(self, data):
node_type = data['node_type']
group_id = data['group_id']
if node_type == 'ps':
self._update_ps(group_id, data)
else:
self._update_controller(group_id, data)
def get_stat_of_train(self):
d = json.loads(self.get_train_stat_json())
d2 = json.loads(
self.df.groupby(['train_id'])['group_id'].count().to_json(
orient='index'))
for k, v in d.iteritems():
v['count'] = d2[k]
return d
def get_train_stat_json(self):
df = self.df
return df.groupby(['train_id']).mean().to_json(orient='index')
def get_group_stat_json(self):
df = self.df
return df.groupby(['group_id']).mean().to_json(orient='index')
def _update_ps(self, group_id, raw):
worker_id = raw['worker_id']
raw['merged'] = False
if group_id in self.controller_dict:
controller_data = self.controller_dict[group_id]
merged_data = self._merge(raw, controller_data)
self._append(merged_data)
else:
d = self.ps_dict
if group_id not in d:
d[group_id] = {}
group = d[group_id]
group[worker_id] = raw
def _merge(self, ps_data, controller_data):
return util.merge_two_dicts(ps_data, controller_data)
def _append(self, merged_data):
l = self._to_list(merged_data)
df = self.df
df.loc[len(df)] = l
def _update_controller(self, group_id, data):
self.controller_dict[group_id] = data
psd = self.ps_dict
if group_id in psd:
group_dict = psd[group_id]
for ps in group_dict.itervalues():
merged_data = self._merge(ps, data)
self._append(merged_data)
del psd[group_id]
class Center(SingletonMixin):
def __init__(self):
super(Center, self).__init__()
self.loop_count = 0
self.ml_worker = {}
self.ps = {}
self.ps_detail = []
self.measure_container = MeasureContainer()
# def start_train(self, worker_id, code_name, train_id):
# msg = 'Start (%s:%s)' % (code_name, train_id)
# w = self.ml_worker[worker_id]
# w['description'] = msg
# def finish_train(self, worker_id, code_name, train_id):
# msg = 'Finish (%s:%s)' % (code_name, train_id)
# w = self.ml_worker[worker_id]
# w['description'] = msg
# def connect_ml_worker(self, worker_id):
# self.ml_worker[worker_id] = {
# 'worker_id': worker_id,
# 'description': 'connected',
# }
def update_ps_detail(self, data):
group_id = data['group_id']
msg = data['msg']
worker_id = data['worker_id']
now = datetime.now()
now_str = now.strftime('%H:%M:%S.%f')
self.ps_detail.append({
'group_id': group_id,
'worker_id': worker_id,
'msg': msg,
'time': now_str})
def update_ps(self, data):
v = data['value']
group_id = v['group_id']
self.ps[group_id] = v
def update_measurement(self, data):
self.measure_container.update(data)
def get_data(self):
return {
'loop_count': self.loop_count,
'train': TrainCenter().get_info(),
'worker': [v for k, v in self.ml_worker.iteritems()],
'ps': [v for k, v in self.ps.iteritems()],
'ps_detail': self.ps_detail,
'stat_of_group': json.loads(
self.measure_container.get_group_stat_json()),
'stat_of_train': self.measure_container.get_stat_of_train(),
}
class DefaultRoute(Resource):
def get(self):
return Center().get_data()
api.add_resource(DefaultRoute, '/')
def run():
t1 = threading.Thread(target=start_sub_log_and_command)
t1.daemon = True
t1.start()
t2 = threading.Thread(target=start_train_center)
t2.daemon = True
t2.start()
admin_config = config['admin']
app.run(host='0.0.0.0', port=int(admin_config['port']), debug=False)
# app.run(port=int(admin_config['port']), debug=True)
if __name__ == '__main__':
run()
|
apache-2.0
|
lbdreyer/iris
|
lib/iris/tests/integration/plot/test_plot_2d_coords.py
|
3
|
2568
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test plots with two dimensional coordinates.
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
import iris
from iris.analysis.cartography import unrotate_pole
from iris.cube import Cube
from iris.coords import AuxCoord
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_data
def simple_cube_w_2d_coords():
path = tests.get_data_path(("NetCDF", "ORCA2", "votemper.nc"))
cube = iris.load_cube(path)
return cube
@tests.skip_plot
@tests.skip_data
class Test(tests.GraphicsTest):
def test_2d_coord_bounds_platecarree(self):
# To avoid a problem with Cartopy smearing the data where the
# longitude wraps, we set the central_longitude
cube = simple_cube_w_2d_coords()[0, 0]
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
qplt.pcolormesh(cube)
ax.coastlines(resolution="110m", color="red")
self.check_graphic()
def test_2d_coord_bounds_northpolarstereo(self):
cube = simple_cube_w_2d_coords()[0, 0]
ax = plt.axes(projection=ccrs.NorthPolarStereo())
qplt.pcolormesh(cube)
ax.coastlines(resolution="110m", color="red")
self.check_graphic()
@tests.skip_plot
class Test2dContour(tests.GraphicsTest):
def test_2d_coords_contour(self):
ny, nx = 4, 6
x1 = np.linspace(-20, 70, nx)
y1 = np.linspace(10, 60, ny)
data = np.zeros((ny, nx))
data.flat[:] = np.arange(nx * ny) % 7
cube = Cube(data, long_name="Odd data")
x2, y2 = np.meshgrid(x1, y1)
true_lons, true_lats = unrotate_pole(x2, y2, -130.0, 77.0)
co_x = AuxCoord(true_lons, standard_name="longitude", units="degrees")
co_y = AuxCoord(true_lats, standard_name="latitude", units="degrees")
cube.add_aux_coord(co_y, (0, 1))
cube.add_aux_coord(co_x, (0, 1))
ax = plt.axes(projection=ccrs.PlateCarree())
qplt.contourf(cube)
ax.coastlines(resolution="110m", color="red")
ax.gridlines(draw_labels=True)
ax.set_extent((0, 180, 0, 90))
self.check_graphic()
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
theavey/ParaTemp
|
tests/test_energy_bin_analysis.py
|
1
|
3275
|
"""This contains a set of tests for paratemp.coordinate_analysis"""
########################################################################
# #
# This test was written by Thomas Heavey in 2018. #
# [email protected] [email protected] #
# #
# Copyright 2017-18 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
import pandas as pd
import pathlib
import pytest
from matplotlib.figure import Figure
from paratemp.tools import cd
def test_get_energies(pt_run_dir):
# Doesn't currently test:
# content of the outputs
from paratemp.energy_bin_analysis import get_energies
with cd(pt_run_dir):
mi_df = get_energies('PT-out')
assert len(mi_df.index.levels[0]) == 2
assert isinstance(mi_df, pd.DataFrame)
assert isinstance(mi_df.index, pd.MultiIndex)
@pytest.fixture
def energies_df(pt_run_dir):
from paratemp.energy_bin_analysis import get_energies
with cd(pt_run_dir):
mi_df = get_energies('PT-out')
return mi_df
def test_make_energy_component_plots(energies_df):
from paratemp.energy_bin_analysis import make_energy_component_plots
fig = make_energy_component_plots(energies_df, 'Pressure', display=True)
assert isinstance(fig, Figure)
fig = make_energy_component_plots(energies_df, 'Pressure', display=False)
assert fig is None
@pytest.fixture
def replica_temp_path(pt_run_dir: pathlib.PosixPath):
# Doesn't currently test:
# content of the outputs
# what happens if they already exist
from paratemp.energy_histo import make_indices
with cd(pt_run_dir):
make_indices('PT-out0.log')
return pt_run_dir / 'replica_temp.xvg'
class TestDeconvolveEnergies(object):
def test_function_runs(self, energies_df, replica_temp_path):
from paratemp.energy_bin_analysis import deconvolve_energies
df = deconvolve_energies(energies_df, index=str(replica_temp_path))
assert isinstance(df, pd.DataFrame)
|
apache-2.0
|
Traecp/MCA_GUI
|
build/scripts-2.7/McaGUI.py
|
2
|
73382
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.ndimage
from scipy import stats
from scipy.fftpack import fft, fftfreq, fftshift
import os, sys
import gc
from os import listdir
from os.path import isfile,join
import gtk
import matplotlib as mpl
import matplotlib.pyplot as plt
#mpl.use('GtkAgg')
from matplotlib.figure import Figure
#from matplotlib.axes import Subplot
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
from matplotlib.cm import jet#, gist_rainbow # colormap
from matplotlib.widgets import Cursor
#from matplotlib.patches import Rectangle
from matplotlib import path
#import matplotlib.patches as patches
from matplotlib.ticker import MaxNLocator
import xrayutilities as xu
from lmfit import Parameters, minimize
import h5py as h5
from MCA_GUI import mca_spec as SP
__version__ = "1.1.7"
__date__ = "05/11/2014"
__author__ = "Thanh-Tra NGUYEN"
__email__ = "[email protected]"
#mpl.rcParams['font.size'] = 18.0
#mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.handletextpad'] = 0.5
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['figure.subplot.bottom'] = 0.13
mpl.rcParams['figure.subplot.top'] = 0.93
mpl.rcParams['figure.subplot.left'] = 0.14
mpl.rcParams['figure.subplot.right'] = 0.915
mpl.rcParams['savefig.dpi'] = 300
def Fourier(X,vect):
N = vect.size #number of data points
T = X[1] - X[0] #sample spacing
TF = fft(vect)
xf = fftfreq(N,T)
xf = fftshift(xf)
yplot = fftshift(TF)
yplot = np.abs(yplot)
yplot = yplot[N/2:]
xf = xf[N/2:]
return xf, yplot/yplot.max()
def flat_data(data,dynlow, dynhigh, log):
""" Returns data where maximum superior than 10^dynhigh will be replaced by 10^dynhigh, inferior than 10^dynlow will be replaced by 10^dynlow"""
if log:
mi = 10**dynlow
ma = 10**dynhigh
data=np.minimum(np.maximum(data,mi),ma)
data=np.log10(data)
else:
mi = dynlow
ma = dynhigh
data=np.minimum(np.maximum(data,mi),ma)
return data
def psdVoigt(parameters,x):
"""Define pseudovoigt function"""
y0 = parameters['y0'].value
xc = parameters['xc'].value
A = parameters['A'].value
w = parameters['w'].value
mu = parameters['mu'].value
y = y0 + A * ( mu * (2/np.pi) * (w / (4*(x-xc)**2 + w**2)) + (1 - mu) * (np.sqrt(4*np.log(2)) / (np.sqrt(np.pi) * w)) * np.exp(-(4*np.log(2)/w**2)*(x-xc)**2) )
return y
def objective(pars,y,x):
#we will minimize this function
err = y - psdVoigt(pars,x)
return err
def init(data_x,data_y,xc,arbitrary=False):
""" param = [y0, xc, A, w, mu]
Je veux que Xc soit la position que l'utilisateur pointe sur l'image pour tracer les profiles"""
param = Parameters()
#idA=np.where(data_x - xc < 1e-4)[0]
if arbitrary:
A = data_y.max()
else:
idA=np.where(data_x==xc)[0][0]
A = data_y[idA]
y0 = 1.0
w = 0.5
mu = 0.5
param.add('y0', value=y0)
param.add('xc', value=xc)
param.add('A', value=A)
param.add('w', value=w)
param.add('mu', value=mu, min=0., max=1.)
return param
def fit(data_x,data_y,xc, arbitrary=False):
""" return: fitted data y, fitted parameters """
param_init = init(data_x,data_y,xc,arbitrary)
if data_x[0] > data_x[-1]:
data_x = data_x[::-1]
result = minimize(objective, param_init, args=(data_y,data_x))
x = np.linspace(data_x.min(),data_x.max(),data_x.shape[0])
y = psdVoigt(param_init,x)
return param_init, y
class PopUpFringes(object):
def __init__(self, xdata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
xi = np.arange(len(self.xdata))
slope, intercept, r_value, p_value, std_err = stats.linregress(self.xdata,xi)
fitline = slope*self.xdata+intercept
self.ax.plot(self.xdata, fitline, 'r-',self.xdata,xi, 'bo')
self.ax.axis([self.xdata.min(),self.xdata.max(),xi.min()-1, xi.max()+1])
self.ax.text(0.3, 0.9,'Slope = %.4f +- %.4f' % (slope, std_err),
horizontalalignment='center',
verticalalignment='center',
transform = self.ax.transAxes,
color='red')
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
class PopUpImage(object):
def __init__(self, xdata, ydata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
self.ydata = ydata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.canvas.mpl_connect("button_press_event",self.on_press)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
self.ax.plot(self.xdata, self.ydata, 'b-', lw=2)
self.textes = []
self.plots = []
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
def on_press(self, event):
if event.inaxes == self.ax and event.button==3:
self.clear_notes()
xc = event.xdata
#***** Find the closest x value *****
residuel = self.xdata - xc
residuel = np.abs(residuel)
j = np.argmin(residuel)
#y = self.ydata[i-1:i+1]
#yc= y.max()
#j = np.where(self.ydata == yc)
#j = j[0][0]
xc= self.xdata[j]
x_fit = self.xdata[j-3:j+3]
y_fit = self.ydata[j-3:j+3]
fitted_param, fitted_data = fit(x_fit, y_fit, xc, True)
x_fit = np.linspace(x_fit.min(), x_fit.max(), 200)
y_fit = psdVoigt(fitted_param, x_fit)
period = fitted_param['xc'].value
std_err= fitted_param['xc'].stderr
p = self.ax.plot(x_fit, y_fit,'r-')
p2 = self.ax.axvline(period,color='green',lw=2)
txt=self.ax.text(0.05, 0.9, 'Period = %.4f +- %.4f (nm)'%(period, std_err), transform = self.ax.transAxes, color='red')
self.textes.append(txt)
self.plots.append(p[0])
self.plots.append(p2)
elif event.inaxes == self.ax and event.button==2:
dif = np.diff(self.ydata)
dif = dif/dif.max()
p3=self.ax.plot(dif,'r-')
self.plots.append(p3[0])
self.canvas.draw()
def clear_notes(self):
if len(self.textes)>0:
for t in self.textes:
t.remove()
if len(self.plots)>0:
for p in self.plots:
p.remove()
self.textes = []
self.plots = []
class MyMainWindow(gtk.Window):
def __init__(self):
super(MyMainWindow, self).__init__()
self.set_title("MCA Reciprocal space map processing. Version %s - last update on: %s"%(__version__,__date__))
self.set_size_request(1200,900)
self.set_position(gtk.WIN_POS_CENTER)
self.set_border_width(10)
self.toolbar = gtk.Toolbar()
self.toolbar.set_style(gtk.TOOLBAR_ICONS)
self.refreshtb = gtk.ToolButton(gtk.STOCK_REFRESH)
self.opentb = gtk.ToolButton(gtk.STOCK_OPEN)
self.sep = gtk.SeparatorToolItem()
self.aspecttb = gtk.ToolButton(gtk.STOCK_PAGE_SETUP)
self.quittb = gtk.ToolButton(gtk.STOCK_QUIT)
self.toolbar.insert(self.opentb, 0)
self.toolbar.insert(self.refreshtb, 1)
self.toolbar.insert(self.aspecttb, 2)
self.toolbar.insert(self.sep, 3)
self.toolbar.insert(self.quittb, 4)
self.tooltips = gtk.Tooltips()
self.tooltips.set_tip(self.refreshtb,"Reload data files")
self.tooltips.set_tip(self.opentb,"Open a folder containing HDF5 (*.h5) data files")
self.tooltips.set_tip(self.aspecttb,"Change the graph's aspect ratio")
self.tooltips.set_tip(self.quittb,"Quit the program")
self.opentb.connect("clicked", self.choose_folder)
self.refreshtb.connect("clicked",self.folder_update)
self.aspecttb.connect("clicked",self.change_aspect_ratio)
self.quittb.connect("clicked", gtk.main_quit)
self.graph_aspect = False #Flag to change the aspect ratio of the graph, False = Auto, True = equal
############################# BOXES ###############################################
vbox = gtk.VBox()
vbox.pack_start(self.toolbar,False,False,0)
hbox=gtk.HBox()
######################### TREE VIEW #############################################
self.sw = gtk.ScrolledWindow()
self.sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
hbox.pack_start(self.sw, False, False, 0)
self.store=[]
self.list_store = gtk.ListStore(str)
self.treeView = gtk.TreeView(self.list_store)
self.treeView.connect("row-activated",self.on_changed_rsm)
rendererText = gtk.CellRendererText()
self.TVcolumn = gtk.TreeViewColumn("RSM data files", rendererText, text=0)
self.TVcolumn.set_sort_column_id(0)
self.treeView.append_column(self.TVcolumn)
self.sw.add(self.treeView)
self.GUI_current_folder = self.DATA_current_folder = os.getcwd()
#******************************************************************
# Notebooks
#******************************************************************
self.notebook = gtk.Notebook()
self.page_GUI = gtk.HBox()
self.page_conversion = gtk.VBox()
self.page_XRDML = gtk.VBox()
######################################FIGURES####################33
#self.page_single_figure = gtk.HBox()
self.midle_panel = gtk.VBox()
self.rsm = ""
self.rsm_choosen = ""
self.my_notes = []
self.lines = []
self.points=[]
self.polygons=[]
self.fig=Figure(dpi=100)
## Draw line for arbitrary profiles
self.arb_lines_X = []
self.arb_lines_Y = []
self.arb_line_points = 0
#self.ax = self.fig.add_subplot(111)
self.ax = self.fig.add_axes([0.1,0.2,0.7,0.7])
self.fig.subplots_adjust(left=0.1,bottom=0.20, top=0.90)
self.vmin = 0
self.vmax = 1000
self.vmax_range = self.vmax
self.canvas = FigureCanvas(self.fig)
Fig_hbox = gtk.HBox()
self.Export_HQ_Image_btn = gtk.Button("Save HQ image")
self.Export_HQ_Image_btn.connect("clicked", self.Export_HQ_Image)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#Global color bar
self.cax = self.fig.add_axes([0.85, 0.20, 0.03, 0.70])#left,bottom,width,height
#self.canvas.mpl_connect("motion_notify_event",self.on_motion)
self.canvas.mpl_connect("button_press_event",self.on_press)
#self.canvas.mpl_connect("button_release_event",self.on_release)
self.mouse_moved = False #If click without move: donot zoom the image
Fig_hbox.pack_start(self.Export_HQ_Image_btn, False, False, 0)
Fig_hbox.pack_start(self.main_figure_navBar, True,True, 0)
self.midle_panel.pack_start(Fig_hbox, False,False, 0)
self.midle_panel.pack_start(self.canvas, True,True, 2)
self.page_GUI.pack_start(self.midle_panel, True,True, 0)
#hbox.pack_start(self.midle_panel, True,True, 0)
########################################## RIGHT PANEL ###################
self.right_panel = gtk.VBox(False,0)
self.linear_scale_btn = gtk.ToggleButton("Linear scale")
self.linear_scale_btn.set_usize(30,0)
self.linear_scale_btn.connect("toggled",self.log_update)
self.log_scale=0
#self.wavelength_txt = gtk.Label("Energy (eV)")
##self.wavelength_txt.set_alignment(1,0.5)
#self.wavelength_field = gtk.Entry()
#self.wavelength_field.set_text("8333")
#self.wavelength_field.set_usize(30,0)
#self.lattice_const_txt = gtk.Label("Lattice constant (nm)")
#self.lattice_const_txt.set_alignment(1,0.5)
#self.lattice_const = gtk.Entry()
#self.lattice_const.set_text("0.5431")
#self.lattice_const.set_usize(30,0)
self.int_range_txt = gtk.Label("Integration range")
self.int_range_txt.set_alignment(1,0.5)
self.int_range = gtk.Entry()
self.int_range.set_text("0.05")
self.int_range.set_usize(30,0)
self.fitting_range_txt = gtk.Label("Fitting range")
self.fitting_range_txt.set_alignment(1,0.5)
self.fitting_range = gtk.Entry()
self.fitting_range.set_text("0.1")
self.fitting_range.set_usize(30,0)
# ********** Set the default values for configuration *************
self.plotXYprofiles_btn = gtk.RadioButton(None,"Plot X,Y profiles")
self.plotXYprofiles_btn.set_active(False)
self.arbitrary_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"Arbitrary profiles")
self.rectangle_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"ROI projection")
self.option_table = gtk.Table(4,3,False)#Pack the options
self.option_table.attach(self.linear_scale_btn, 0,1,0,1)
self.option_table.attach(self.plotXYprofiles_btn,0,1,1,2)
self.option_table.attach(self.arbitrary_profiles_btn,0,1,2,3)
self.option_table.attach(self.rectangle_profiles_btn,0,1,3,4)
# self.option_table.attach(self.wavelength_txt,1,2,0,1)
# self.option_table.attach(self.wavelength_field,2,3,0,1)
# self.option_table.attach(self.lattice_const_txt,1,2,1,2)
# self.option_table.attach(self.lattice_const, 2,3,1,2)
self.option_table.attach(self.int_range_txt, 1,2,0,1)
self.option_table.attach(self.int_range, 2,3,0,1)
self.option_table.attach(self.fitting_range_txt, 1,2,1,2)
self.option_table.attach(self.fitting_range, 2,3,1,2)
### Options for profile plots
self.profiles_log_btn = gtk.ToggleButton("Y-Log")
self.profiles_log_btn.connect("toggled",self.profiles_update)
self.profiles_export_data_btn = gtk.Button("Export data")
self.profiles_export_data_btn.connect("clicked",self.profiles_export)
self.profiles_option_box = gtk.HBox(False,0)
self.profiles_option_box.pack_start(self.profiles_log_btn, False, False, 0)
self.profiles_option_box.pack_start(self.profiles_export_data_btn, False, False, 0)
### Figure of profiles plot
self.profiles_fringes = []
self.fig_profiles = Figure()
self.profiles_ax1 = self.fig_profiles.add_subplot(211)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2 = self.fig_profiles.add_subplot(212)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas = FigureCanvas(self.fig_profiles)
self.profiles_canvas.set_size_request(450,50)
self.profiles_canvas.mpl_connect("button_press_event",self.profile_press)
self.profiles_navBar = NavigationToolbar(self.profiles_canvas, self)
self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
#### Results of fitted curves
self.fit_results_table = gtk.Table(7,3, False)
title = gtk.Label("Fitted results:")
self.chi_title = gtk.Label("Qz profile")
self.tth_title = gtk.Label("Qx profile")
y0 = gtk.Label("y0:")
xc = gtk.Label("xc:")
A = gtk.Label("A:")
w = gtk.Label("FWHM:")
mu = gtk.Label("mu:")
y0.set_alignment(0,0.5)
xc.set_alignment(0,0.5)
A.set_alignment(0,0.5)
w.set_alignment(0,0.5)
mu.set_alignment(0,0.5)
self.Qz_fitted_y0 = gtk.Label()
self.Qz_fitted_xc = gtk.Label()
self.Qz_fitted_A = gtk.Label()
self.Qz_fitted_w = gtk.Label()
self.Qz_fitted_mu = gtk.Label()
self.Qx_fitted_y0 = gtk.Label()
self.Qx_fitted_xc = gtk.Label()
self.Qx_fitted_A = gtk.Label()
self.Qx_fitted_w = gtk.Label()
self.Qx_fitted_mu = gtk.Label()
self.fit_results_table.attach(title,0,3,0,1)
self.fit_results_table.attach(self.chi_title,1,2,1,2)
self.fit_results_table.attach(self.tth_title,2,3,1,2)
self.fit_results_table.attach(y0,0,1,2,3)
self.fit_results_table.attach(xc,0,1,3,4)
self.fit_results_table.attach(A,0,1,4,5)
self.fit_results_table.attach(w,0,1,5,6)
self.fit_results_table.attach(mu,0,1,6,7)
self.fit_results_table.attach(self.Qz_fitted_y0,1,2,2,3)
self.fit_results_table.attach(self.Qz_fitted_xc,1,2,3,4)
self.fit_results_table.attach(self.Qz_fitted_A,1,2,4,5)
self.fit_results_table.attach(self.Qz_fitted_w,1,2,5,6)
self.fit_results_table.attach(self.Qz_fitted_mu,1,2,6,7)
self.fit_results_table.attach(self.Qx_fitted_y0,2,3,2,3)
self.fit_results_table.attach(self.Qx_fitted_xc,2,3,3,4)
self.fit_results_table.attach(self.Qx_fitted_A,2,3,4,5)
self.fit_results_table.attach(self.Qx_fitted_w,2,3,5,6)
self.fit_results_table.attach(self.Qx_fitted_mu,2,3,6,7)
#### PACK the right panel
self.right_panel.pack_start(self.option_table, False, False, 0)
self.right_panel.pack_start(self.profiles_option_box,False,False,0)
self.right_panel.pack_start(self.profiles_navBar,False,False,0)
self.right_panel.pack_start(self.profiles_canvas,True,True,0)
self.right_panel.pack_start(self.fit_results_table, False, False, 0)
self.page_GUI.pack_end(self.right_panel,False, False,5)
#********************************************************************
# Conversion data SPEC to HDF page
#********************************************************************
self.conv_box = gtk.VBox()
self.box1 = gtk.HBox()
self.det_frame = gtk.Frame()
self.det_frame.set_label("Detector Vantec")
self.det_frame.set_label_align(0.5,0.5)
self.exp_frame = gtk.Frame()
self.exp_frame.set_label("Experiment parameters")
self.exp_frame.set_label_align(0.5,0.5)
self.conv_frame = gtk.Frame()
self.conv_frame.set_label("Data conversion: SPEC-HDF5")
self.conv_frame.set_label_align(0.5,0.5)
#self.conv_frame.set_alignment(0.5,0.5)
#********************************************************************
# Detector parameters
#********************************************************************
self.det_table = gtk.Table(6,2,False)
self.t1 = gtk.Label("Detector size (mm)")
self.t2 = gtk.Label("Number of channels")
self.t3 = gtk.Label("Center channel")
self.t4 = gtk.Label("Channels/Degree")
self.t5 = gtk.Label("ROI (from-to)")
self.t6 = gtk.Label("Orientation")
self.t1.set_alignment(0,0.5)
self.t2.set_alignment(0,0.5)
self.t3.set_alignment(0,0.5)
self.t4.set_alignment(0,0.5)
self.t5.set_alignment(0,0.5)
self.t6.set_alignment(0,0.5)
self.t1_entry = gtk.Entry()
self.t1_entry.set_text("50")
self.t2_entry = gtk.Entry()
self.t2_entry.set_text("2048")
self.t3_entry = gtk.Entry()
self.t3_entry.set_text("819.87")
self.t4_entry = gtk.Entry()
self.t4_entry.set_text("211.012")
self.small_box = gtk.HBox()
self.t5_label = gtk.Label("-")
self.t5_entry1 = gtk.Entry()
self.t5_entry1.set_text("40")
self.t5_entry2 = gtk.Entry()
self.t5_entry2.set_text("1300")
self.small_box.pack_start(self.t5_entry1,True, True,0)
self.small_box.pack_start(self.t5_label,True, True,0)
self.small_box.pack_start(self.t5_entry2,True, True,0)
self.t6_entry = gtk.combo_box_new_text()
self.t6_entry.append_text("Up (zero on the bottom)")
self.t6_entry.append_text("Down (zero on the top)")
self.t6_entry.set_active(1)
self.det_table.attach(self.t1, 0,1,0,1)
self.det_table.attach(self.t2, 0,1,1,2)
self.det_table.attach(self.t3, 0,1,2,3)
self.det_table.attach(self.t4, 0,1,3,4)
self.det_table.attach(self.t5, 0,1,4,5)
self.det_table.attach(self.t6, 0,1,5,6)
self.det_table.attach(self.t1_entry, 1,2,0,1)
self.det_table.attach(self.t2_entry, 1,2,1,2)
self.det_table.attach(self.t3_entry, 1,2,2,3)
self.det_table.attach(self.t4_entry, 1,2,3,4)
self.det_table.attach(self.small_box, 1,2,4,5)
self.det_table.attach(self.t6_entry, 1,2,5,6)
self.det_table_align = gtk.Alignment()
self.det_table_align.set_padding(15,10,10,10)
self.det_table_align.set(0.5, 0.5, 1.0, 1.0)
self.det_table_align.add(self.det_table)
self.det_frame.add(self.det_table_align)
#********************************************************************
# Experiment parameters
#********************************************************************
self.exp_table = gtk.Table(6,2,False)
self.e1 = gtk.Label("Substrate material:")
self.e1_other = gtk.Label("If other:")
self.e2 = gtk.Label("Energy (eV)")
self.e3 = gtk.Label("Attenuation coefficient file")
self.e4 = gtk.Label("Foil colunm name (in SPEC file)")
self.e5 = gtk.Label("Monitor colunm name (in SPEC file)")
self.e6 = gtk.Label("Reference monitor (for normalization)")
self.e1.set_alignment(0,0.5)
self.e1_other.set_alignment(1,0.5)
self.e2.set_alignment(0,0.5)
self.e3.set_alignment(0,0.5)
self.e4.set_alignment(0,0.5)
self.e5.set_alignment(0,0.5)
self.e6.set_alignment(0,0.5)
#self.e1_entry = gtk.Label("Si for now")
self.e1_entry = gtk.combo_box_new_text()
self.e1_entry.append_text("-- other")
self.e1_entry.append_text("Si")
self.e1_entry.append_text("Ge")
self.e1_entry.append_text("GaAs")
self.e1_entry.append_text("GaP")
self.e1_entry.append_text("GaSb")
self.e1_entry.append_text("InAs")
self.e1_entry.append_text("InP")
self.e1_entry.append_text("InSb")
self.e1_entry.set_active(1)
self.e1_entry_other = gtk.Entry()
self.e1_entry_other.set_text("")
self.e2_entry = gtk.Entry()
self.e2_entry.set_text("8333")
self.e3_box = gtk.HBox()
self.e3_path =gtk.Entry()
self.e3_browse = gtk.Button("Browse")
self.e3_browse.connect("clicked", self.select_file, self.e3_path, "A")
self.e3_box.pack_start(self.e3_path, False, False, 0)
self.e3_box.pack_start(self.e3_browse, False, False, 0)
self.e4_entry = gtk.Entry()
self.e4_entry.set_text("pfoil")
self.e5_entry = gtk.Entry()
self.e5_entry.set_text("vct3")
self.e6_entry = gtk.Entry()
self.e6_entry.set_text("1e6")
substrate_box1 = gtk.HBox()
substrate_box2 = gtk.HBox()
substrate_box1.pack_start(self.e1, False, False, 0)
substrate_box1.pack_start(self.e1_entry, False, False, 0)
substrate_box2.pack_start(self.e1_other, False, False, 0)
substrate_box2.pack_start(self.e1_entry_other, False, False, 0)
self.exp_table.attach(substrate_box1, 0,1,0,1)
self.exp_table.attach(self.e2, 0,1,1,2)
self.exp_table.attach(self.e3, 0,1,2,3)
self.exp_table.attach(self.e4, 0,1,3,4)
self.exp_table.attach(self.e5, 0,1,4,5)
self.exp_table.attach(self.e6, 0,1,5,6)
self.exp_table.attach(substrate_box2, 1,2,0,1)
self.exp_table.attach(self.e2_entry, 1,2,1,2)
self.exp_table.attach(self.e3_box, 1,2,2,3)
self.exp_table.attach(self.e4_entry, 1,2,3,4)
self.exp_table.attach(self.e5_entry, 1,2,4,5)
self.exp_table.attach(self.e6_entry, 1,2,5,6)
self.exp_table_align = gtk.Alignment()
self.exp_table_align.set_padding(15,10,10,10)
self.exp_table_align.set(0.5, 0.5, 1.0, 1.0)
self.exp_table_align.add(self.exp_table)
self.exp_frame.add(self.exp_table_align)
#********************************************************************
# Data conversion information
#********************************************************************
self.conv_table = gtk.Table(6,3,False)
self.c1 = gtk.Label("Spec file")
self.c2 = gtk.Label("MCA file")
self.c3 = gtk.Label("Destination folder")
self.c4 = gtk.Label("Scan number (from-to)")
self.c5 = gtk.Label("Description for each RSM (optional-separate by comma)")
self.c6 = gtk.Label("Problem of foil delay (foil[n]-->data[n+1])")
self.c1.set_alignment(0,0.5)
self.c2.set_alignment(0,0.5)
self.c3.set_alignment(0,0.5)
self.c4.set_alignment(0,0.5)
self.c5.set_alignment(0,0.5)
self.c6.set_alignment(0,0.5)
self.c1_entry1 = gtk.Entry()
self.c2_entry1 = gtk.Entry()
self.c3_entry1 = gtk.Entry()
self.c4_entry1 = gtk.Entry()
self.c5_entry1 = gtk.Entry()
self.c5_entry1.set_text("")
self.c6_entry = gtk.CheckButton()
self.c1_entry2 = gtk.Button("Browse SPEC")
self.c2_entry2 = gtk.Button("Browse MCA")
self.c3_entry2 = gtk.Button("Browse Folder")
self.c4_entry2 = gtk.Entry()
self.c1_entry2.connect("clicked", self.select_file, self.c1_entry1, "S")
self.c2_entry2.connect("clicked", self.select_file, self.c2_entry1, "M")
self.c3_entry2.connect("clicked", self.select_folder, self.c3_entry1, "D")
self.conv_table.attach(self.c1, 0,1,0,1)
self.conv_table.attach(self.c2, 0,1,1,2)
self.conv_table.attach(self.c3, 0,1,2,3)
self.conv_table.attach(self.c4, 0,1,3,4)
self.conv_table.attach(self.c5, 0,1,4,5)
self.conv_table.attach(self.c6, 0,1,5,6)
self.conv_table.attach(self.c1_entry1, 1,2,0,1)
self.conv_table.attach(self.c2_entry1, 1,2,1,2)
self.conv_table.attach(self.c3_entry1, 1,2,2,3)
self.conv_table.attach(self.c4_entry1, 1,2,3,4)
self.conv_table.attach(self.c5_entry1, 1,3,4,5)
self.conv_table.attach(self.c6_entry, 1,2,5,6)
self.conv_table.attach(self.c1_entry2, 2,3,0,1)
self.conv_table.attach(self.c2_entry2, 2,3,1,2)
self.conv_table.attach(self.c3_entry2, 2,3,2,3)
self.conv_table.attach(self.c4_entry2, 2,3,3,4)
self.conv_table_align = gtk.Alignment()
self.conv_table_align.set_padding(15,10,10,10)
self.conv_table_align.set(0.5, 0.5, 1.0, 1.0)
self.conv_table_align.add(self.conv_table)
self.conv_frame.add(self.conv_table_align)
#********************************************************************
# The RUN button
#********************************************************************
self.run_conversion = gtk.Button("Execute")
self.run_conversion.connect("clicked", self.spec2HDF)
self.run_conversion.set_size_request(50,30)
self.show_info = gtk.Label()
#********************************************************************
# Pack the frames
#********************************************************************
self.box1.pack_start(self.det_frame,padding=15)
self.box1.pack_end(self.exp_frame, padding =15)
self.conv_box.pack_start(self.box1,padding=15)
self.conv_box.pack_start(self.conv_frame,padding=5)
self.conv_box.pack_start(self.run_conversion, False,False,10)
self.conv_box.pack_start(self.show_info, False,False,10)
self.page_conversion.pack_start(self.conv_box,False, False,20)
#********************************************************************
# Conversion XRDML data to HDF
#********************************************************************
self.XRDML_conv_box = gtk.VBox()
self.Instrument_table = gtk.Table(1,4,True)
self.Inst_txt = gtk.Label("Instrument:")
self.Inst_txt.set_alignment(0,0.5)
self.Instrument = gtk.combo_box_new_text()
self.Instrument.append_text("Bruker")
self.Instrument.append_text("PANalytical")
self.Instrument.set_active(0)
self.Instrument_table.attach(self.Inst_txt,0,1,0,1)
self.Instrument_table.attach(self.Instrument, 1,2,0,1)
self.Instrument.connect("changed",self.Change_Lab_Instrument)
self.choosen_instrument = self.Instrument.get_active_text()
self.XRDML_table = gtk.Table(7,4,True)
self.XRDML_tooltip = gtk.Tooltips()
self.XRDML_substrate_txt = gtk.Label("Substrate material:")
self.XRDML_substrate_other_txt = gtk.Label("If other:")
self.XRDML_substrate_inplane_txt= gtk.Label("In-plane direction (i.e. 1 1 0) - optional")
self.XRDML_substrate_outplane_txt= gtk.Label("Out-of-plane direction (i.e. 0 0 1)-optional")
self.XRDML_reflection_txt = gtk.Label("Reflection (H K L) - optional:")
self.XRDML_energy_txt = gtk.Label("Energy (eV) - optional:")
self.XRDML_description_txt = gtk.Label("Description of the sample:")
self.XRDML_xrdml_file_txt = gtk.Label("Select RAW file:")
self.XRDML_destination_txt = gtk.Label("Select a destination folder:")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_txt, "Substrate material")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_other_txt, "The substrate material, i.e. Al, SiO2, CdTe, GaN,...")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_inplane_txt, "The substrate in-plane an out-of-plane direction - for calculation of the orientation matrix.")
self.XRDML_tooltip.set_tip(self.XRDML_reflection_txt, "H K L, separate by space, i.e. 2 2 4 (0 0 0 for a XRR map). This is used for offset correction.")
self.XRDML_tooltip.set_tip(self.XRDML_energy_txt, "If empty, the default Cu K_alpha_1 will be used.")
self.XRDML_tooltip.set_tip(self.XRDML_description_txt, "Description of the sample, this will be the name of the converted file. If empty, it will be named 'RSM.h5'")
self.XRDML_tooltip.set_tip(self.XRDML_xrdml_file_txt, "Select the data file recorded by the chosen equipment")
self.XRDML_tooltip.set_tip(self.XRDML_destination_txt, "Select a destination folder to store the converted file.")
self.XRDML_substrate_txt.set_alignment(0,0.5)
self.XRDML_substrate_other_txt.set_alignment(1,0.5)
self.XRDML_substrate_inplane_txt.set_alignment(0,0.5)
self.XRDML_substrate_outplane_txt.set_alignment(1,0.5)
self.XRDML_reflection_txt.set_alignment(0,0.5)
self.XRDML_energy_txt.set_alignment(0,0.5)
self.XRDML_description_txt.set_alignment(0,0.5)
self.XRDML_xrdml_file_txt.set_alignment(0,0.5)
self.XRDML_destination_txt.set_alignment(0,0.5)
self.XRDML_substrate = gtk.combo_box_new_text()
self.XRDML_substrate.append_text("-- other")
self.XRDML_substrate.append_text("Si")
self.XRDML_substrate.append_text("Ge")
self.XRDML_substrate.append_text("GaAs")
self.XRDML_substrate.append_text("GaP")
self.XRDML_substrate.append_text("GaSb")
self.XRDML_substrate.append_text("InAs")
self.XRDML_substrate.append_text("InP")
self.XRDML_substrate.append_text("InSb")
self.XRDML_substrate.set_active(0)
self.XRDML_substrate_other = gtk.Entry()
self.XRDML_substrate_other.set_text("")
self.XRDML_substrate_inplane = gtk.Entry()
self.XRDML_substrate_inplane.set_text("")
self.XRDML_substrate_outplane = gtk.Entry()
self.XRDML_substrate_outplane.set_text("")
self.XRDML_reflection = gtk.Entry()
self.XRDML_reflection.set_text("")
self.XRDML_energy = gtk.Entry()
self.XRDML_energy.set_text("")
self.XRDML_description = gtk.Entry()
self.XRDML_description.set_text("")
self.XRDML_xrdml_file_path = gtk.Entry()
self.XRDML_destination_path = gtk.Entry()
self.XRDML_xrdml_file_browse = gtk.Button("Browse RAW file")
self.XRDML_destination_browse= gtk.Button("Browse destination folder")
self.XRDML_xrdml_file_browse.connect("clicked", self.select_file, self.XRDML_xrdml_file_path, "S")
self.XRDML_destination_browse.connect("clicked", self.select_folder, self.XRDML_destination_path, "D")
self.XRDML_table.attach(self.XRDML_substrate_txt, 0,1,0,1)
self.XRDML_table.attach(self.XRDML_substrate, 1,2,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other_txt, 2,3,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other, 3,4,0,1)
self.XRDML_table.attach(self.XRDML_substrate_inplane_txt, 0,1,1,2)
self.XRDML_table.attach(self.XRDML_substrate_inplane, 1,2,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane_txt, 2,3,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane, 3,4,1,2)
self.XRDML_table.attach(self.XRDML_reflection_txt, 0,1,2,3)
self.XRDML_table.attach(self.XRDML_reflection, 1,2,2,3)
self.XRDML_table.attach(self.XRDML_energy_txt,0,1,3,4)
self.XRDML_table.attach(self.XRDML_energy, 1,2,3,4)
self.XRDML_table.attach(self.XRDML_description_txt, 0,1,4,5)
self.XRDML_table.attach(self.XRDML_description, 1,2,4,5)
self.XRDML_table.attach(self.XRDML_xrdml_file_txt, 0,1,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_path, 1,2,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_browse, 2,3,5,6)
self.XRDML_table.attach(self.XRDML_destination_txt, 0,1,6,7)
self.XRDML_table.attach(self.XRDML_destination_path, 1,2,6,7)
self.XRDML_table.attach(self.XRDML_destination_browse, 2,3,6,7)
#********************************************************************
# The RUN button
#********************************************************************
self.XRDML_run = gtk.Button("Execute")
self.XRDML_run.connect("clicked", self.Convert_Lab_Source)
self.XRDML_run.set_size_request(50,30)
self.XRDML_show_info = gtk.Label()
#********************************************************************
# Pack the XRDML options
#********************************************************************
self.XRDML_conv_box.pack_start(self.Instrument_table, False, False,5)
self.XRDML_conv_box.pack_start(self.XRDML_table, False, False, 10)
self.XRDML_conv_box.pack_start(self.XRDML_run, False, False, 5)
self.XRDML_conv_box.pack_start(self.XRDML_show_info, False,False,10)
self.page_XRDML.pack_start(self.XRDML_conv_box,False, False,20)
#********************************************************************
# Pack the notebook
#********************************************************************
self.notebook.append_page(self.page_GUI, gtk.Label("RSM GUI"))
self.notebook.append_page(self.page_conversion, gtk.Label("ESRF-MCA spec file (Vantec)"))
self.notebook.append_page(self.page_XRDML, gtk.Label("Lab instruments"))
hbox.pack_start(self.notebook)
vbox.pack_start(hbox,True,True,0)
############################### Sliders ######################################
#sld_box = gtk.Fixed()
sld_box = gtk.HBox(False,2)
self.vmin_txt = gtk.Label("Vmin")
self.vmin_txt.set_alignment(0,0.5)
#self.vmin_txt.set_justify(gtk.JUSTIFY_CENTER)
self.vmax_txt = gtk.Label("Vmax")
self.vmax_txt.set_alignment(0,0.5)
#self.vmax_txt.set_justify(gtk.JUSTIFY_CENTER)
self.sld_vmin = gtk.HScale()
self.sld_vmax = gtk.HScale()
self.sld_vmin.set_size_request(200,25)
self.sld_vmax.set_size_request(200,25)
self.sld_vmin.set_range(0,self.vmax)
self.sld_vmax.set_range(0,self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(0)
self.sld_vmin.connect('value-changed',self.scale_update)
self.sld_vmax.connect('value-changed',self.scale_update)
vmax_spin_adj = gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmax_spin_btn = gtk.SpinButton(vmax_spin_adj,1,1)
self.vmax_spin_btn.set_numeric(True)
self.vmax_spin_btn.set_wrap(True)
self.vmax_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmax_spin_btn.connect('value-changed',self.scale_update_spin)
vmin_spin_adj = gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmin_spin_btn = gtk.SpinButton(vmin_spin_adj,1,1)
self.vmin_spin_btn.set_numeric(True)
self.vmin_spin_btn.set_wrap(True)
self.vmin_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmin_spin_btn.connect('value-changed',self.scale_update_spin)
sld_box.pack_start(self.vmin_txt,False,False,0)
sld_box.pack_start(self.sld_vmin,False,False,0)
sld_box.pack_start(self.vmin_spin_btn,False,False,0)
sld_box.pack_start(self.vmax_txt,False,False,0)
sld_box.pack_start(self.sld_vmax,False,False,0)
sld_box.pack_start(self.vmax_spin_btn,False,False,0)
#sld_box.pack_start(self.slider_reset_btn,False,False,0)
vbox.pack_start(sld_box,False,False,3)
self.add(vbox)
self.connect("destroy", gtk.main_quit)
self.show_all()
#########################################################################################################################
def format_coord(self, x, y):
#***** Add intensity information into the navigation toolbar *******************************
numrows, numcols = (self.gridder.data.T).shape
col,row = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = self.gridder.data.T[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
def pro_format_coord(self,x,y):
return 'x=%.4f, y=%.1f'%(x,y)
def init_image(self,log=False):
self.ax.cla()
self.cax.cla()
#print "Initialize image ..."
#
#self.clevels = np.linspace(self.vmin, self.vmax, 100)
if log:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T),vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T), self.clevels, vmin=self.vmin, vmax=self.vmax)
else:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T,vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T, self.clevels, vmin=self.vmin, vmax=self.vmax)
self.img.cmap.set_under(alpha=0)
self.ax.axis([self.gridder.xaxis.min(), self.gridder.xaxis.max(), self.gridder.yaxis.min(), self.gridder.yaxis.max()])
#self.ax.set_aspect('equal')
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.ax.yaxis.label.set_size(20)
self.ax.xaxis.label.set_size(20)
self.ax.set_title(self.rsm_description,fontsize=20)
self.ax.format_coord = self.format_coord
self.cb = self.fig.colorbar(self.img, cax = self.cax, format="%.1f")#format=fm
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Intensity)\ [arb.\ units]$',fontsize=20)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=20)
self.cb.locator = MaxNLocator(nbins=6)
#self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#print "Image is initialized."
def change_aspect_ratio(self,w):
self.graph_aspect = not (self.graph_aspect)
if self.graph_aspect == True:
self.ax.set_aspect('equal')
else:
self.ax.set_aspect('auto')
self.canvas.draw()
def on_changed_rsm(self,widget,row,col):
#print "************Change RSM*************"
gc.collect() #Clear unused variables to gain memory
#************** Remind the structure of these HDF5 files:
# ************* file=[scan_id={'eta'=[data], '2theta'=[data], 'intensity'=[data], 'description'='RSM 004 ...'}]
self.clear_notes()
#self.init_image()
model = widget.get_model()
self.rsm_choosen = model[row][0]
self.rsm = join(self.GUI_current_folder,self.rsm_choosen)#file path
self.rsm_info = h5.File(self.rsm,'r')#HDF5 object that collects all information of this scan
#self.ax.set_title(self.rsm_choosen,fontsize=20)
### Data Loading ##
groups = self.rsm_info.keys()
scan = groups[0]
self.scan = self.rsm_info[scan]
self.data = self.scan.get('intensity').value
self.Qx = self.scan.get('Qx').value
self.Qy = self.scan.get('Qy').value
self.Qz = self.scan.get('Qz').value
self.rsm_description = self.scan.get('description').value
self.rsm_info.close()
#print "Data are successfully loaded."
self.gridder = xu.Gridder2D(self.data.shape[0],self.data.shape[1])
#print "Gridder is calculated."
# MM = self.data.max()
# M = np.log10(MM)
# data = flat_data(self.data,0,M)
self.gridder(self.Qx, self.Qz, self.data)
self.data = self.gridder.data.T
self.vmin=self.data.min()
self.vmax=self.data.max()
#print "Starting scale_plot()"
self.scale_plot()
#self.slider_update()
def scale_plot(self):
#print "Scale_plot() is called."
data = self.data.copy()
#self.init_image()
if self.linear_scale_btn.get_active():
self.linear_scale_btn.set_label("--> Linear scale")
data = np.log10(data)
#print data.max()
self.init_image(log=True)
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
self.vmax = np.log10(actual_vmax) if self.log_scale == 0 else actual_vmax
if actual_vmin == 0:
self.vmin=0
elif actual_vmin >0:
self.vmin = np.log10(actual_vmin) if self.log_scale == 0 else actual_vmin
self.vmax_range = data.max()
self.log_scale = 1
#log=True
else:
self.linear_scale_btn.set_label("--> Log scale")
self.init_image(log=False)
#print "Calculating min max and update slider..."
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
#print "Actual vmax: ",actual_vmax
if self.log_scale == 1:
self.vmax = np.power(10.,actual_vmax)
else:
self.vmax = actual_vmax
self.vmax_range = data.max()
if actual_vmin ==0:
self.vmin = 0
elif actual_vmin>0:
if self.log_scale == 0:
self.vmin = actual_vmin
elif self.log_scale == 1:
self.vmin = np.power(10,actual_vmin)
self.log_scale = 0
#log=False
#print "Min max are calculated."
self.sld_vmax.set_range(-6,self.vmax_range)
self.sld_vmin.set_range(-6,self.vmax_range)
#self.init_image(log)
self.slider_update()
def log_update(self,widget):
self.scale_plot()
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Counts\ per\ second)\ [arb.\ units]$',fontsize=18)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=18)
#self.slider_update()
def scale_update(self,widget):
#print "Scale_update() is called."
self.vmin = self.sld_vmin.get_value()
self.vmax = self.sld_vmax.get_value()
self.vmin_spin_btn.set_value(self.vmin)
self.vmax_spin_btn.set_value(self.vmax)
self.slider_update()
def scale_update_spin(self,widget):
#print "Spin_update() is called"
self.vmin = self.vmin_spin_btn.get_value()
self.vmax = self.vmax_spin_btn.get_value()
self.slider_update()
def slider_update(self):
#print "slider_update() is called"
#self.img.set_clim(self.vmin, self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(self.vmin)
if self.linear_scale_btn.get_active():
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.1, 1.0, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.1, 1.0, 0))
else:
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 10, 100, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 10, 100, 0))
#self.vmax_spin_btn.update()
self.img.set_clim(self.vmin, self.vmax)
self.ax.relim()
self.canvas.draw()
#print "slider_update() stoped."
def choose_folder(self, w):
dialog = gtk.FileChooserDialog(title="Select a data folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.GUI_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
folder = folder.decode('utf8')
folder_basename = folder.split("/")[-1]
#print folder_basename
self.store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.GUI_current_folder = folder
#print store
if len(self.store)>0:
self.list_store.clear()
for i in self.store:
self.list_store.append([i])
self.TVcolumn.set_title(folder_basename)
else:
pass
else:
pass
dialog.destroy()
def folder_update(self, w):
folder = self.GUI_current_folder
if folder is not os.getcwd():
store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.store=[]
self.list_store.clear()
for i in store:
self.list_store.append([i])
self.store.append(i)
def arbitrary_line_cut(self, x, y):
#**** num: integer - number of points to be extracted
#**** convert Q coordinates to pixel coordinates
x0, y0 = xu.analysis.line_cuts.getindex(x[0], y[0], self.gridder.xaxis, self.gridder.yaxis)
x1, y1 = xu.analysis.line_cuts.getindex(x[1], y[1], self.gridder.xaxis, self.gridder.yaxis)
num = int(np.hypot(x1-x0, y1-y0)) #number of points that will be plotted
xi, yi = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
profiles_data_X = profiles_data_Y = scipy.ndimage.map_coordinates(self.gridder.data, np.vstack((xi,yi)))
coor_X_export,coor_Y_export = np.linspace(x[0], x[1], num), np.linspace(y[0], y[1], num)
#coor_X_export = np.sort(coor_X_export)
#coor_Y_export = np.sort(coor_Y_export)
return coor_X_export,coor_Y_export, profiles_data_X, profiles_data_Y
def boundary_rectangles(self, x, y):
"""
IN : x[0,1], y[0,1]: positions of the line cut (arbitrary direction)
OUT: ROI rectangle: the rectangle in which the data will be taken
Bound rectangle: the limit values for Qx, Qz line cuts (min, max)
"""
x = np.asarray(x)
y = np.asarray(y)
alpha = np.arctan(abs((y[1]-y[0])/(x[1]-x[0]))) # inclined angle of the ROI w.r.t the horizontal line. Attention to the sign of alpha
#print np.degrees(alpha)
T = self.largueur_int/2.
if np.degrees(alpha)>55.0:
inc_x = 1
inc_y = 0
else:
inc_x = 0
inc_y = 1
y1 = y + T*inc_y
y2 = y - T*inc_y
x1 = x + T*inc_x
x2 = x - T*inc_x
#These positions are in reciprocal space units. The boundary order will be: 1-2-2-1
roi_rect = [[y1[0],x1[0]],[y2[0],x2[0]],[y2[1],x2[1]],[y1[1],x1[1]],[y1[0],x1[0]]]
roi_rect = path.Path(roi_rect)
#***************** Get the corresponding index of these points ***************************
i1,j1 = xu.analysis.line_cuts.getindex(x1[0], y1[0], self.gridder.xaxis, self.gridder.yaxis)
i2,j2 = xu.analysis.line_cuts.getindex(x2[0], y2[0], self.gridder.xaxis, self.gridder.yaxis)
i3,j3 = xu.analysis.line_cuts.getindex(x2[1], y2[1], self.gridder.xaxis, self.gridder.yaxis)
i4,j4 = xu.analysis.line_cuts.getindex(x1[1], y1[1], self.gridder.xaxis, self.gridder.yaxis)
roi_box = [[j1,i1],[j2,i2],[j3,i3],[j4,i4],[j1,i1]]
roi_box = path.Path(roi_box)
#******* Calculate the limit boundary rectangle
y_tmp = np.vstack((y1, y2))
x_tmp = np.vstack((x1, x2))
y_min = y_tmp.min()
y_max = y_tmp.max()
x_min = x_tmp.min()
x_max = x_tmp.max()
bound_rect = [x_min, x_max, y_min, y_max]
bound_rect = np.asarray(bound_rect)
contours = roi_rect.vertices
p=self.ax.plot(contours[:,1], contours[:,0], linewidth=1.5, color='white')
self.polygons.append(p[0])
self.canvas.draw()
return roi_box, bound_rect
def extract_roi_data(self, roi_box, bound_rect):
#***** Extraction of the ROI defined by the ROI box ******************
qx_min = bound_rect[0]
qx_max = bound_rect[1]
qz_min = bound_rect[2]
qz_max = bound_rect[3]
#***** Getting index of the boundary points in order to calculate the length of the extracted array
ixmin, izmin = xu.analysis.line_cuts.getindex(qx_min, qz_min, self.gridder.xaxis, self.gridder.yaxis)
ixmax, izmax = xu.analysis.line_cuts.getindex(qx_max, qz_max, self.gridder.xaxis, self.gridder.yaxis)
x_steps = ixmax - ixmin +1
z_steps = izmax - izmin +1
qx_coor = np.linspace(qx_min, qx_max, x_steps)
qz_coor = np.linspace(qz_min, qz_max, z_steps)
ROI = np.zeros(shape=(x_steps))
#****** Extract Qx line cuts ************************
for zi in range(izmin, izmax+1):
qx_int = self.gridder.data[ixmin:ixmax+1,zi]
#****** if the point is inside the ROI box: point = 0
inpoints = []
for i in range(ixmin,ixmax+1):
inpoint= roi_box.contains_point([zi,i])
inpoints.append(inpoint)
for b in range(len(inpoints)):
if inpoints[b]==False:
qx_int[b] = 0
ROI = np.vstack((ROI, qx_int))
ROI = np.delete(ROI, 0, 0) #Delete the first line which contains zeros
#****** Sum them up! Return Qx, Qz projection zones and Qx,Qz intensity
qx_ROI = ROI.sum(axis=0)/ROI.shape[0]
qz_ROI = ROI.sum(axis=1)/ROI.shape[1]
return qx_coor, qx_ROI, qz_coor, qz_ROI
def plot_profiles(self, x, y, cross_line=True):
if cross_line:
"""Drawing lines where I want to plot profiles"""
# ******** if this is not an arbitrary profile, x and y are not lists but just one individual point
x=x[0]
y=y[0]
hline = self.ax.axhline(y, color='k', ls='--', lw=1)
self.lines.append(hline)
vline = self.ax.axvline(x, color='k', ls='--', lw=1)
self.lines.append(vline)
"""Getting data to be plotted"""
self.coor_X_export, self.profiles_data_X = xu.analysis.line_cuts.get_qx_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, y, qrange=self.largueur_int)
self.coor_Y_export, self.profiles_data_Y = xu.analysis.line_cuts.get_qz_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, x, qrange=self.largueur_int)
xc = x
yc = y
""" Fitting information """
ix,iy = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
ix_left,iy = xu.analysis.line_cuts.getindex(x-self.fitting_width, y, self.gridder.xaxis, self.gridder.yaxis)
qx_2_fit = self.coor_X_export[ix_left:ix*2-ix_left+1]
qx_int_2_fit = self.profiles_data_X[ix_left:2*ix-ix_left+1]
X_fitted_params, X_fitted_data = fit(qx_2_fit, qx_int_2_fit,xc, cross_line)
####################axX.plot(qx_2_fit, qx_fit_data, color='red',linewidth=2)
ix,iy_down = xu.analysis.line_cuts.getindex(x, y-self.fitting_width, self.gridder.xaxis, self.gridder.yaxis)
qz_2_fit = self.coor_Y_export[iy_down:iy*2-iy_down+1]
qz_int_2_fit = self.profiles_data_Y[iy_down:iy*2-iy_down+1]
Y_fitted_params, Y_fitted_data = fit(qz_2_fit, qz_int_2_fit,yc, cross_line)
####################axY.plot(qz_2_fit, qz_fit_data, color='red',linewidth=2)
else:
#**** extract arbitrary line cut
#**** extract one single line cut:
if not self.rectangle_profiles_btn.get_active():
self.coor_X_export, self.coor_Y_export, self.profiles_data_X, self.profiles_data_Y = self.arbitrary_line_cut(x,y)
else:
roi_box,bound_rect = self.boundary_rectangles(x,y)
self.coor_X_export, self.profiles_data_X, self.coor_Y_export, self.profiles_data_Y = self.extract_roi_data(roi_box, bound_rect)
tmpX = np.sort(self.coor_X_export)
tmpY = np.sort(self.coor_Y_export)
xc = tmpX[self.profiles_data_X.argmax()]
yc = tmpY[self.profiles_data_Y.argmax()]
""" Fitting information """
X_fitted_params, X_fitted_data = fit(self.coor_X_export, self.profiles_data_X, xc, not cross_line)
Y_fitted_params, Y_fitted_data = fit(self.coor_Y_export, self.profiles_data_Y, yc, not cross_line)
qx_2_fit = self.coor_X_export
qz_2_fit = self.coor_Y_export
""" Plotting profiles """
self.profiles_ax1.cla()
self.profiles_ax2.cla()
self.profiles_ax1.format_coord = self.pro_format_coord
self.profiles_ax2.format_coord = self.pro_format_coord
#self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
#self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
self.profiles_ax1.plot(self.coor_Y_export, self.profiles_data_Y, color='blue', lw=3)
self.profiles_ax1.plot(qz_2_fit, Y_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax2.plot(self.coor_X_export, self.profiles_data_X, color='blue', lw=3)
self.profiles_ax2.plot(qx_2_fit, X_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas.draw()
# Show the fitted results
self.Qz_fitted_y0.set_text("%.4f"%Y_fitted_params['y0'].value)
self.Qz_fitted_xc.set_text("%.4f"%Y_fitted_params['xc'].value)
self.Qz_fitted_A.set_text("%.4f"%Y_fitted_params['A'].value)
self.Qz_fitted_w.set_text("%.4f"%Y_fitted_params['w'].value)
self.Qz_fitted_mu.set_text("%.4f"%Y_fitted_params['mu'].value)
self.Qx_fitted_y0.set_text("%.4f"%X_fitted_params['y0'].value)
self.Qx_fitted_xc.set_text("%.4f"%X_fitted_params['xc'].value)
self.Qx_fitted_A.set_text("%.4f"%X_fitted_params['A'].value)
self.Qx_fitted_w.set_text("%.4f"%X_fitted_params['w'].value)
self.Qx_fitted_mu.set_text("%.4f"%X_fitted_params['mu'].value)
self.profiles_refresh()
self.canvas.draw()
def draw_pointed(self, x, y, finished=False):
#if len(self.lines)>0:
# self.clear_notes()
p=self.ax.plot(x,y,'ro')
self.points.append(p[0])
if finished:
l=self.ax.plot(self.arb_lines_X, self.arb_lines_Y, '--',linewidth=1.5, color='white')
self.lines.append(l[0])
self.canvas.draw()
def profiles_refresh(self):
""" """
if self.profiles_log_btn.get_active():
self.profiles_ax1.set_yscale('log')
self.profiles_ax2.set_yscale('log')
else:
self.profiles_ax1.set_yscale('linear')
self.profiles_ax2.set_yscale('linear')
self.profiles_canvas.draw()
#return
def profiles_update(self, widget):
self.profiles_refresh()
def profiles_export(self,widget):
""" Export X,Y profiles data in the same folder as the EDF image """
proX_fname = self.rsm.split(".")[0]+"_Qx_profile.dat"
proY_fname = self.rsm.split(".")[0]+"_Qz_profile.dat"
proX_export= np.vstack([self.coor_X_export, self.profiles_data_X])
proX_export=proX_export.T
proY_export= np.vstack([self.coor_Y_export, self.profiles_data_Y])
proY_export=proY_export.T
try:
np.savetxt(proX_fname, proX_export)
np.savetxt(proY_fname, proY_export)
self.popup_info('info','Data are successfully exported!')
except:
self.popup_info('error','ERROR! Data not exported!')
def on_press(self, event):
#******************** Plot X,Y cross profiles ***************************************************
if (event.inaxes == self.ax) and (event.button==3) and self.plotXYprofiles_btn.get_active():
x = event.xdata
y = event.ydata
xx=[]
yy=[]
xx.append(x)
yy.append(y)
self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
self.plot_profiles(xx,yy,cross_line=True)
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
#******************** Plot arbitrary profiles ***************************************************
elif (event.inaxes == self.ax) and (event.button==1) and (self.arbitrary_profiles_btn.get_active() or self.rectangle_profiles_btn.get_active()):
#self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
self.arb_line_points +=1
#print "Number of points clicked: ",self.arb_line_points
if self.arb_line_points>2:
self.clear_notes()
self.arb_line_points=1
x = event.xdata
y = event.ydata
self.arb_lines_X.append(x)
self.arb_lines_Y.append(y)
if len(self.arb_lines_X)<2:
finished=False
elif len(self.arb_lines_X)==2:
finished = True
self.draw_pointed(x,y,finished)#If finished clicking, connect the two points by a line
if finished:
self.plot_profiles(self.arb_lines_X, self.arb_lines_Y, cross_line=False)
self.arb_lines_X=[]
self.arb_lines_Y=[]
#self.canvas.draw()
#******************** Clear cross lines in the main image ****************************************
elif event.button==2:
self.clear_notes()
def profile_press(self, event):
""" Calculate thickness fringes """
if event.inaxes == self.profiles_ax1:
draw_fringes = True
ax = self.profiles_ax1
X_data = self.coor_Y_export
Y_data = self.profiles_data_Y
xlabel = r'$Q_z (nm^{-1})$'
title = "Linear regression of Qz fringes"
title_FFT = "Fast Fourier Transform of Qz profiles"
xlabel_FFT= "Period (nm)"
elif event.inaxes == self.profiles_ax2:
draw_fringes = True
ax = self.profiles_ax2
X_data = self.coor_X_export
Y_data = self.profiles_data_X
xlabel = r'$Q_x (nm^{-1})$'
title = "Linear regression of Qx fringes"
title_FFT = "Fast Fourier Transform of Qx profiles"
xlabel_FFT= "Period (nm)"
else:
draw_fringes = False
if draw_fringes and (event.button==1):
if len(self.profiles_fringes)>0:
self.profiles_fringes = np.asarray(self.profiles_fringes)
self.profiles_fringes = np.sort(self.profiles_fringes)
fringes_popup = PopUpFringes(self.profiles_fringes, xlabel, "Fringes order", title)
self.profiles_fringes=[]
self.clear_notes()
elif draw_fringes and (event.button == 3):
vline=ax.axvline(event.xdata, linewidth=2, color="green")
self.lines.append(vline)
self.profiles_fringes.append(event.xdata)
elif draw_fringes and event.button == 2:
XF,YF = Fourier(X_data, Y_data)
popup_window=PopUpImage(XF, YF, xlabel_FFT, "Normalized intensity", title_FFT)
self.profiles_canvas.draw()
#plt.clf()
def clear_notes(self):
"""
print "Number of notes: ",len(self.my_notes)
print "Number of lines: ",len(self.lines)
print "Number of points: ",len(self.points)
print "Number of polygons: ",len(self.polygons)
"""
if len(self.my_notes)>0:
for txt in self.my_notes:
txt.remove()
if len(self.lines)>0:
for line in self.lines:
line.remove()
if len(self.points)>0:
for p in self.points:
p.remove()
if len(self.polygons)>0:
for p in self.polygons:
p.remove()
self.canvas.draw()
self.my_notes = []
#self.profiles_notes = []
self.lines=[]
self.points=[]
self.polygons=[]
self.arb_lines_X=[]
self.arb_lines_Y=[]
self.arb_line_points = 0
def on_motion(self,event):
print "Mouse moved !"
if event.inaxes == self.ax and self.arbitrary_profiles_btn.get_active() and self.arb_line_points==1:
x = event.xdata
y = event.ydata
self.clear_notes()
line = self.ax.plot([self.arb_lines_X[0], x], [self.arb_lines_Y[0],y], 'ro-')
self.lines.append(line)
self.canvas.draw()
def on_release(self, event):
if event.inaxes == self.ax:
if self.mouse_moved==True:
self.mouse_moved = False
def popup_info(self,info_type,text):
""" info_type = WARNING, INFO, QUESTION, ERROR """
if info_type.upper() == "WARNING":
mess_type = gtk.MESSAGE_WARNING
elif info_type.upper() == "INFO":
mess_type = gtk.MESSAGE_INFO
elif info_type.upper() == "ERROR":
mess_type = gtk.MESSAGE_ERROR
elif info_type.upper() == "QUESTION":
mess_type = gtk.MESSAGE_QUESTION
self.warning=gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, mess_type, gtk.BUTTONS_CLOSE,text)
self.warning.run()
self.warning.destroy()
#********************************************************************
# Functions for the Spec-HDF5 data conversion
#********************************************************************
def select_file(self,widget,path,label):
dialog = gtk.FileChooserDialog("Select file",None,gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response = dialog.run()
if response == gtk.RESPONSE_OK:
file_choosen = dialog.get_filename()
path.set_text(file_choosen)
self.DATA_current_folder = os.path.dirname(file_choosen)
if label == "A":
self.attenuation_file = file_choosen.decode('utf8')
elif label == "S":
self.spec_file = file_choosen.decode('utf8')
elif label == "M":
self.mca_file = file_choosen.decode('utf8')
else:
pass
dialog.destroy()
def select_folder(self, widget, path, label):
dialog = gtk.FileChooserDialog(title="Select folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
path.set_text(folder)
self.DATA_current_folder = folder.decode('utf8')
if label == "D":
self.des_folder = folder.decode('utf8')
else:
pass
dialog.destroy()
def HKL2Q(self,H,K,L,a):
""" Q// est dans la direction [110], Qz // [001]"""
Qx = H*np.sqrt(2.)/a
Qy = K*np.sqrt(2.)/a
Qz = L/a
return [Qx, Qy, Qz]
def loadAmap(self,scanid,specfile,mapData,retard):
try:
psdSize = float(self.t1_entry.get_text())
Nchannels = int(self.t2_entry.get_text())
psdMin = int(self.t5_entry1.get_text())
psdMax = int(self.t5_entry2.get_text())
psd0 = float(self.t3_entry.get_text())
pixelSize = psdSize/Nchannels
pixelPerDeg = float(self.t4_entry.get_text())
distance = pixelSize * pixelPerDeg / np.tan(np.radians(1.0)) # sample-detector distance in mm
psdor = self.t6_entry.get_active() #psd orientation (up, down, in, out)
if psdor == 0:
psdor = 'z+'
elif psdor == 1:
psdor = 'z-'
else:
psdor = 'unknown'
energy = float(self.e2_entry.get_text())
filter_data = self.attenuation_file
monitor_col = self.e5_entry.get_text()
foil_col = self.e4_entry.get_text()
monitor_ref = float(self.e6_entry.get_text())
#****************** Calculation ************************
headers, scan_kappa = SP.ReadSpec(specfile,scanid)
Eta = scan_kappa['Eta']
print Eta.shape
tth = headers['P'][0]
omega = headers['P'][1]
tth = float(tth)
omega = float(omega)
print "Del: %.2f, Eta: %.2f"%(tth,omega)
#Si = xu.materials.Si
hxrd = xu.HXRD(self.substrate.Q(self.in_plane), self.substrate.Q(self.out_of_plane), en = energy)
hxrd.Ang2Q.init_linear(psdor,psd0, Nchannels, distance=distance, pixelwidth=pixelSize, chpdeg=pixelPerDeg)
HKL = hxrd.Ang2HKL(omega, tth)
HKL = np.asarray(HKL)
HKL = HKL.astype(int)
print "HKL = ",HKL
H=K=L=np.zeros(shape=(0,Nchannels))
for i in range(len(Eta)):
om=Eta[i]
q=hxrd.Ang2HKL(om,tth,mat=self.substrate,dettype='linear')
H = np.vstack((H,q[0]))
K = np.vstack((K,q[1]))
L = np.vstack((L,q[2]))
filtre_foil = scan_kappa[foil_col]
filtre = filtre_foil.copy()
monitor= scan_kappa[monitor_col]
foil_data = np.loadtxt(filter_data)
for f in xrange(foil_data.shape[0]):
coef = filtre_foil == f
filtre[coef] = foil_data[f,1]
#print filtre
mapData = mapData + 1e-6
if retard:
for i in range(len(filtre)-1):
mapData[i+1] = mapData[i+1]*filtre[i]
else:
for i in range(len(filtre)):
mapData[i] = mapData[i]*filtre[i]
for i in range(len(monitor)):
mapData[i] = mapData[i]*monitor_ref/monitor[i]
mapData = mapData[:,psdMin:psdMax]
H = H[:,psdMin:psdMax]
K = K[:,psdMin:psdMax]
L = L[:,psdMin:psdMax]
########## Correction d'offset ###############
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = HKL[0] - H_sub
K_offset = HKL[1] - K_sub
L_offset = HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
Q = self.HKL2Q(H, K, L, a)
return Q,mapData
except:
self.popup_info("warning", "Please make sure that you have correctly entered the all parameters.")
return None,None
def gtk_waiting(self):
while gtk.events_pending():
gtk.main_iteration()
def Change_Lab_Instrument(self, widget):
self.choosen_instrument = self.Instrument.get_active_text()
print "I choose ",self.choosen_instrument
if self.choosen_instrument == "Bruker":
self.XRDML_xrdml_file_txt.set_text("Select RAW file: ")
self.XRDML_xrdml_file_browse.set_label("Browse RAW file")
elif self.choosen_instrument == "PANalytical":
self.XRDML_xrdml_file_txt.set_text("Select XRDML file: ")
self.XRDML_xrdml_file_browse.set_label("Browse XRDML file")
def Convert_Lab_Source(self, widget):
print "Instrument chosen: ",self.choosen_instrument
energy = self.XRDML_energy.get_text()
if energy == "":
energy = 8048
else:
energy = float(energy)
self.lam = xu.lam2en(energy)/10
HKL = self.XRDML_reflection.get_text()
if HKL == "":
self.offset_correction = False
else:
self.offset_correction = True
HKL = HKL.split()
HKL = np.asarray([int(i) for i in HKL])
self.HKL = HKL
substrate = self.XRDML_substrate.get_active_text()
if substrate == "-- other":
substrate = self.XRDML_substrate_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
in_plane = self.XRDML_substrate_inplane.get_text()
out_of_plane = self.XRDML_substrate_outplane.get_text()
if in_plane != "" and out_of_plane != "":
in_plane = in_plane.split()
self.in_plane = np.asarray([int(i) for i in in_plane])
out_of_plane = out_of_plane.split()
self.out_of_plane = np.asarray([int(i) for i in out_of_plane])
self.has_orientation_matrix = True
self.experiment = xu.HXRD(self.substrate.Q(self.in_plane),self.substrate.Q(self.out_of_plane), en=energy)
else:
self.has_orientation_matrix = False
self.experiment = xu.HXRD(self.substrate.Q(1,1,0),self.substrate.Q(0,0,1), en=energy)
if self.choosen_instrument == "Bruker":
self.Bruker2HDF()
elif self.choosen_instrument == "PANalytical":
self.XRDML2HDF()
def XRDML2HDF(self):
try:
xrdml_file = self.spec_file
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading XRDML data ...")
self.gtk_waiting()
dataFile = xu.io.XRDMLFile(xrdml_file)
scan = dataFile.scan
omega_exp = scan['Omega']
tth_exp = scan['2Theta']
data = scan['detector']
if self.has_orientation_matrix:
omega,tth,psd = xu.io.getxrdml_map(xrdml_file)
[qx,qy,qz] = self.experiment.Ang2Q(omega, tth)
mapData = psd.reshape(data.shape)
H = qy.reshape(data.shape)
K = qy.reshape(data.shape)
L = qz.reshape(data.shape)
else:
mapData = data
psi = omega_exp - tth_exp/2.
Qmod= 2.*np.sin(np.radians(tth_exp/2.))/self.lam
Qx = Qmod * np.sin(np.radians(psi))
Qz = Qmod * np.cos(np.radians(psi))
H=K = Qx*a/np.sqrt(2.0)
L = Qz*a
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = self.HKL[0] - H_sub
K_offset = self.HKL[1] - K_sub
L_offset = self.HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
Q = self.HKL2Q(H, K, L, a)
self.XRDML_show_info.set_text("XRDML data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "XRDML_Map"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=mapData, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Bruker2HDF(self):
try:
raw_file = self.spec_file
from MCA_GUI.Bruker import convert_raw_to_uxd,get_Bruker
uxd_file = raw_file.split(".")[0]+".uxd"
convert_raw_to_uxd(raw_file, uxd_file)
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading Raw data ...")
self.gtk_waiting()
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
dataset = get_Bruker(uxd_file)
theta = dataset['omega']
dTheta = dataset['tth']
Qhkl = self.experiment.Ang2HKL(theta, dTheta)
Qx,Qy,Qz = Qhkl[0],Qhkl[1],Qhkl[2]
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(dataset['data']),dataset['data'].shape)
Hsub = Qhkl[0][x,y]
Ksub = Qhkl[1][x,y]
Lsub = Qhkl[2][x,y]
Qx = Qhkl[0]+self.HKL[0]-Hsub
Qy = Qhkl[1]+self.HKL[1]-Ksub
Qz = Qhkl[2]+self.HKL[2]-Lsub
Q = self.HKL2Q(Qx, Qy, Qz, a)
self.XRDML_show_info.set_text("Raw data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "RSM"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=dataset['data'], compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def spec2HDF(self,widget):
try:
specfile = self.spec_file
mcafile = self.mca_file
scan_beg = int(self.c4_entry1.get_text())
scan_end = int(self.c4_entry2.get_text())
substrate = self.e1_entry.get_active_text()
if substrate == "-- other":
substrate = self.e1_entry_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
scanid = range(scan_beg, scan_end+1)
self.show_info.set_text("Reading MCA data ...")
self.gtk_waiting()
allMaps = SP.ReadMCA2D_complete(mcafile)
description = self.c5_entry1.get_text()
retard = self.c6_entry.get_active()
total = len(allMaps)
total_maps_loaded = "Number of map(s) loaded: %d"%total
self.show_info.set_text(total_maps_loaded)
self.gtk_waiting()
if description == "":
no_description = True
else:
description = description.split(",")
no_description = False
for i in range(len(allMaps)):
scannumber = scanid[i]
scan_name = "Scan_%d"%scannumber
if no_description:
h5file = scan_name+".h5"
d = scan_name
else:
h5file = description[i].strip()+".h5"
d = description[i].strip()
info = "\nSaving file N# %d/%d: %s"%(i+1,total,h5file)
out_info = total_maps_loaded + info
self.show_info.set_text(out_info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
Q,mapdata = self.loadAmap(scannumber, specfile, allMaps[i], retard)
s = h5file.create_group(scan_name)
s.create_dataset('intensity', data=mapdata, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=d)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Export_HQ_Image(self, widget):
dialog = gtk.FileChooserDialog(title="Save image", action=gtk.FILE_CHOOSER_ACTION_SAVE, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
filename = self.rsm_choosen.split(".")[0] if self.rsm_choosen != "" else "Img"
dialog.set_current_name(filename+".png")
#dialog.set_filename(filename)
dialog.set_current_folder(self.GUI_current_folder)
filtre = gtk.FileFilter()
filtre.set_name("images")
filtre.add_pattern("*.png")
filtre.add_pattern("*.jpg")
filtre.add_pattern("*.pdf")
filtre.add_pattern("*.ps")
filtre.add_pattern("*.eps")
dialog.add_filter(filtre)
filtre = gtk.FileFilter()
filtre.set_name("Other")
filtre.add_pattern("*")
dialog.add_filter(filtre)
response = dialog.run()
if response==gtk.RESPONSE_OK:
#self.fig.savefig(dialog.get_filename())
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
fig = plt.figure(figsize=(10,8),dpi=100)
ax = fig.add_axes([0.12,0.2,0.7,0.7])
cax = fig.add_axes([0.85,0.2,0.03,0.7])
clabel = r'$Intensity\ (Counts\ per\ second)$'
fmt = "%d"
if self.linear_scale_btn.get_active():
clabel = r'$Log_{10}\ (Intensity)\ [arb.\ units]$'
fmt = "%.2f"
data = self.gridder.data.T
data = flat_data(data, self.vmin, self.vmax, self.linear_scale_btn.get_active())
img = ax.contourf(self.gridder.xaxis, self.gridder.yaxis, data, 100, vmin=self.vmin*1.1, vmax=self.vmax)
cb = fig.colorbar(img,cax=cax, format=fmt)
cb.set_label(clabel, fontsize=20)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.yaxis.label.set_size(20)
ax.xaxis.label.set_size(20)
ax.set_title(self.rsm_description,fontsize=20)
fig.savefig(dialog.get_filename())
plt.close()
dialog.destroy()
if __name__=="__main__":
MyMainWindow()
gtk.main()
|
gpl-2.0
|
equialgo/scikit-learn
|
sklearn/pipeline.py
|
5
|
28682
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# License: BSD
from collections import defaultdict
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
__all__ = ['Pipeline', 'FeatureUnion']
class _BasePipeline(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Handles parameter management for classifiers composed of named steps.
"""
@abstractmethod
def __init__(self):
pass
def _replace_step(self, steps_attr, name, new_val):
# assumes `name` is a valid step name
new_steps = getattr(self, steps_attr)[:]
for i, (step_name, _) in enumerate(new_steps):
if step_name == name:
new_steps[i] = (name, new_val)
break
setattr(self, steps_attr, new_steps)
def _get_params(self, steps_attr, deep=True):
out = super(_BasePipeline, self).get_params(deep=False)
if not deep:
return out
steps = getattr(self, steps_attr)
out.update(steps)
for name, estimator in steps:
if estimator is None:
continue
for key, value in six.iteritems(estimator.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _set_params(self, steps_attr, **params):
# Ensure strict ordering of parameter setting:
# 1. All steps
if steps_attr in params:
setattr(self, steps_attr, params.pop(steps_attr))
# 2. Step replacement
step_names, _ = zip(*getattr(self, steps_attr))
for name in list(six.iterkeys(params)):
if '__' not in name and name in step_names:
self._replace_step(steps_attr, name, params.pop(name))
# 3. Step parameters and other initilisation arguments
super(_BasePipeline, self).set_params(**params)
return self
def _validate_names(self, names):
if len(set(names)) != len(names):
raise ValueError('Names provided are not unique: '
'{0!r}'.format(list(names)))
invalid_names = set(names).intersection(self.get_params(deep=False))
if invalid_names:
raise ValueError('Step names conflict with constructor arguments: '
'{0!r}'.format(sorted(invalid_names)))
invalid_names = [name for name in names if '__' in name]
if invalid_names:
raise ValueError('Step names must not contain __: got '
'{0!r}'.format(invalid_names))
class Pipeline(_BasePipeline):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
A step's estimator may be replaced entirely by setting the parameter
with its name to another estimator, or a transformer removed by setting
to None.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.829...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([False, False, True, True, False, False, True, True, False,
True, False, True, True, False, True, False, True, True,
False, False], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
# shallow copy of steps
self.steps = tosequence(steps)
self._validate_steps()
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('steps', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('steps', **kwargs)
return self
def _validate_steps(self):
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps should be "
"transformers and implement fit and transform."
" '%s' (type %s) doesn't" % (t, type(t)))
# We allow last estimator to be None as an identity transformation
if estimator is not None and not hasattr(estimator, "fit"):
raise TypeError("Last step of Pipeline should implement fit. "
"'%s' (type %s) doesn't"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _fit(self, X, y=None, **fit_params):
self._validate_steps()
fit_params_steps = dict((name, {}) for name, step in self.steps
if step is not None)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if transform is None:
pass
elif hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
if self._final_estimator is None:
return Xt, {}
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit the model
Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
self : Pipeline
This estimator
"""
Xt, fit_params = self._fit(X, y, **fit_params)
if self._final_estimator is not None:
self._final_estimator.fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit the model and transform with the final estimator
Fits all the transforms one after the other and transforms the
data, then uses fit_transform on transformed data with the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
Transformed samples
"""
last_step = self._final_estimator
Xt, fit_params = self._fit(X, y, **fit_params)
if hasattr(last_step, 'fit_transform'):
return last_step.fit_transform(Xt, y, **fit_params)
elif last_step is None:
return Xt
else:
return last_step.fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Apply transforms to the data, and predict with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_pred : array-like
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of each step, where
each parameter name is prefixed such that parameter ``p`` for step
``s`` has key ``s__p``.
Returns
-------
y_pred : array-like
"""
Xt, fit_params = self._fit(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Apply transforms, and predict_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_proba : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Apply transforms, and decision_function of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Apply transforms, and predict_log_proba of the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : array-like, shape = [n_samples, n_classes]
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@property
def transform(self):
"""Apply transforms, and transform with the final estimator
This also works where final estimator is ``None``: all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
Xt : array-like, shape = [n_samples, n_transformed_features]
"""
# _final_estimator is None or has transform, otherwise attribute error
if self._final_estimator is not None:
self._final_estimator.transform
return self._transform
def _transform(self, X):
Xt = X
for name, transform in self.steps:
if transform is not None:
Xt = transform.transform(Xt)
return Xt
@property
def inverse_transform(self):
"""Apply inverse transformations in reverse order
All estimators in the pipeline must support ``inverse_transform``.
Parameters
----------
Xt : array-like, shape = [n_samples, n_transformed_features]
Data samples, where ``n_samples`` is the number of samples and
``n_features`` is the number of features. Must fulfill
input requirements of last step of pipeline's
``inverse_transform`` method.
Returns
-------
Xt : array-like, shape = [n_samples, n_features]
"""
# raise AttributeError if necessary for hasattr behaviour
for name, transform in self.steps:
if transform is not None:
transform.inverse_transform
return self._inverse_transform
def _inverse_transform(self, X):
Xt = X
for name, transform in self.steps[::-1]:
if transform is not None:
Xt = transform.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None, sample_weight=None):
"""Apply transforms, and score with the final estimator
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
Returns
-------
score : float
"""
Xt = X
for name, transform in self.steps[:-1]:
if transform is not None:
Xt = transform.transform(Xt)
score_params = {}
if sample_weight is not None:
score_params['sample_weight'] = sample_weight
return self.steps[-1][-1].score(Xt, y, **score_params)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
... # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB(priors=None))])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, weight, X):
res = transformer.transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(transformer, name, weight, X, y,
**fit_params):
if hasattr(transformer, 'fit_transform'):
res = transformer.fit_transform(X, y, **fit_params)
else:
res = transformer.fit(X, y, **fit_params).transform(X)
# if we have a weight for this transformer, multiply output
if weight is None:
return res, transformer
return res * weight, transformer
class FeatureUnion(_BasePipeline, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters of the transformers may be set using its name and the parameter
name separated by a '__'. A transformer may be replaced entirely by
setting the parameter with its name to another transformer,
or removed by setting to ``None``.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
transformer_weights : dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = tosequence(transformer_list)
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self._validate_transformers()
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params('transformer_list', deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``.
Returns
-------
self
"""
self._set_params('transformer_list', **kwargs)
return self
def _validate_transformers(self):
names, transformers = zip(*self.transformer_list)
# validate names
self._validate_names(names)
# validate estimators
for t in transformers:
if t is None:
continue
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All estimators should implement fit and "
"transform. '%s' (type %s) doesn't" %
(t, type(t)))
def _iter(self):
"""Generate (name, est, weight) tuples excluding None transformers
"""
get_weight = (self.transformer_weights or {}).get
return ((name, trans, get_weight(name))
for name, trans in self.transformer_list
if trans is not None)
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans, weight in self._iter():
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s (type %s) does not "
"provide get_feature_names."
% (str(name), type(trans).__name__))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data, used to fit transformers.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
self : FeatureUnion
This estimator
"""
self._validate_transformers()
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for _, trans, _ in self._iter())
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, weight, X, y,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : iterable or array-like, depending on transformers
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, weight, X)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def _update_transformer_list(self, transformers):
transformers = iter(transformers)
self.transformer_list[:] = [
(name, None if old is None else next(transformers))
for name, old in self.transformer_list
]
def make_union(*transformers, **kwargs):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
Returns
-------
f : FeatureUnion
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca',
PCA(copy=True, iterated_power='auto',
n_components=None, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
"""
n_jobs = kwargs.pop('n_jobs', 1)
if kwargs:
# We do not currently support `transformer_weights` as we may want to
# change its type spec in make_union
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs)
|
bsd-3-clause
|
ClimbsRocks/auto_ml
|
tests/advanced_tests/classifiers.py
|
1
|
15862
|
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
import numpy as np
import pandas as pd
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
# We just want to make sure these run, not necessarily make sure that they're super accurate (which takes more time, and is dataset dependent)
df_titanic_train = df_titanic_train.sample(frac=0.5)
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
# Small sample sizes mean there's a fair bit of noise here
lower_bound = -0.18
if model_name == 'DeepLearningClassifier':
lower_bound = -0.255
if model_name == 'LGBMClassifier':
lower_bound = -0.221
if model_name == 'GradientBoostingClassifier':
lower_bound = -0.225
if model_name == 'CatBoostClassifier':
lower_bound = -0.221
assert lower_bound < test_score < -0.135
def categorical_ensembling_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_titanic_train, model_names=model_name, categorical_column='embarked')
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
lower_bound = -0.18
upper_bound = -0.145
if model_name == 'DeepLearningClassifier':
lower_bound = -0.215
# CatBoost is super inconsistent
if model_name == 'CatBoostClassifier':
upper_bound = -0.137
assert lower_bound < test_score < upper_bound
def getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.18
upper_bound = -0.135
if model_name == 'DeepLearningClassifier':
lower_bound = -0.195
if model_name == 'CatBoostClassifier':
lower_bound = -0.215
upper_bound = -0.128
assert lower_bound < first_score < upper_bound
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < upper_bound
def getting_single_predictions_multilabel_classification(model_name=None):
# auto_ml does not support multilabel classification for deep learning at the moment
if model_name == 'DeepLearningClassifier' or model_name == 'CatBoostClassifier':
return
np.random.seed(0)
df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset()
column_descriptions = {
'airline_sentiment': 'output'
, 'airline': 'categorical'
, 'text': 'ignore'
, 'tweet_location': 'categorical'
, 'user_timezone': 'categorical'
, 'tweet_created': 'date'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_twitter_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_twitter_test_dictionaries = df_twitter_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
first_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = 0.67
# LGBM is super finnicky here- sometimes it's fine, but sometimes it does pretty terribly.
if model_name == 'LGBMClassifier':
lower_bound = 0.6
assert lower_bound < first_score < 0.79
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_twitter_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_twitter_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('df_twitter_test_dictionaries')
print(df_twitter_test_dictionaries)
second_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < 0.79
def feature_learning_getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train(df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.16
if model_name == 'DeepLearningClassifier':
lower_bound = -0.187
assert lower_bound < first_score < -0.133
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.133
def feature_learning_categorical_ensembling_getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train_categorical_ensemble(df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data, categorical_column='embarked')
file_name = ml_predictor.save(str(random.random()))
from auto_ml.utils_models import load_ml_model
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.17
if model_name == 'DeepLearningClassifier':
lower_bound = -0.245
if model_name == 'CatBoostClassifier':
lower_bound = -0.265
assert lower_bound < first_score < -0.147
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.147
|
mit
|
dmlc/xgboost
|
tests/python-gpu/test_gpu_prediction.py
|
1
|
17716
|
import sys
import pytest
import numpy as np
import xgboost as xgb
from xgboost.compat import PANDAS_INSTALLED
from hypothesis import given, strategies, assume, settings
if PANDAS_INSTALLED:
from hypothesis.extra.pandas import column, data_frames, range_indexes
else:
def noop(*args, **kwargs):
pass
column, data_frames, range_indexes = noop, noop, noop
sys.path.append("tests/python")
import testing as tm
from test_predict import run_threaded_predict # noqa
from test_predict import run_predict_leaf # noqa
rng = np.random.RandomState(1994)
shap_parameter_strategy = strategies.fixed_dictionaries({
'max_depth': strategies.integers(0, 11),
'max_leaves': strategies.integers(0, 256),
'num_parallel_tree': strategies.sampled_from([1, 10]),
}).filter(lambda x: x['max_depth'] > 0 or x['max_leaves'] > 0)
predict_parameter_strategy = strategies.fixed_dictionaries({
'max_depth': strategies.integers(1, 8),
'num_parallel_tree': strategies.sampled_from([1, 4]),
})
class TestGPUPredict:
def test_predict(self):
iterations = 10
np.random.seed(1)
test_num_rows = [10, 1000, 5000]
test_num_cols = [10, 50, 500]
# This test passes for tree_method=gpu_hist and tree_method=exact. but
# for `hist` and `approx` the floating point error accumulates faster
# and fails even tol is set to 1e-4. For `hist`, the mismatching rate
# with 5000 rows is 0.04.
for num_rows in test_num_rows:
for num_cols in test_num_cols:
dtrain = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
dval = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
dtest = xgb.DMatrix(np.random.randn(num_rows, num_cols),
label=[0, 1] * int(num_rows / 2))
watchlist = [(dtrain, 'train'), (dval, 'validation')]
res = {}
param = {
"objective": "binary:logistic",
"predictor": "gpu_predictor",
'eval_metric': 'logloss',
'tree_method': 'gpu_hist',
'max_depth': 1
}
bst = xgb.train(param, dtrain, iterations, evals=watchlist,
evals_result=res)
assert self.non_increasing(res["train"]["logloss"])
gpu_pred_train = bst.predict(dtrain, output_margin=True)
gpu_pred_test = bst.predict(dtest, output_margin=True)
gpu_pred_val = bst.predict(dval, output_margin=True)
param["predictor"] = "cpu_predictor"
bst_cpu = xgb.train(param, dtrain, iterations, evals=watchlist)
cpu_pred_train = bst_cpu.predict(dtrain, output_margin=True)
cpu_pred_test = bst_cpu.predict(dtest, output_margin=True)
cpu_pred_val = bst_cpu.predict(dval, output_margin=True)
np.testing.assert_allclose(cpu_pred_train, gpu_pred_train,
rtol=1e-6)
np.testing.assert_allclose(cpu_pred_val, gpu_pred_val,
rtol=1e-6)
np.testing.assert_allclose(cpu_pred_test, gpu_pred_test,
rtol=1e-6)
def non_increasing(self, L):
return all((y - x) < 0.001 for x, y in zip(L, L[1:]))
# Test case for a bug where multiple batch predictions made on a
# test set produce incorrect results
@pytest.mark.skipif(**tm.no_sklearn())
def test_multi_predict(self):
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
n = 1000
X, y = make_regression(n, random_state=rng)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=123)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test)
params = {}
params["tree_method"] = "gpu_hist"
params['predictor'] = "gpu_predictor"
bst_gpu_predict = xgb.train(params, dtrain)
params['predictor'] = "cpu_predictor"
bst_cpu_predict = xgb.train(params, dtrain)
predict0 = bst_gpu_predict.predict(dtest)
predict1 = bst_gpu_predict.predict(dtest)
cpu_predict = bst_cpu_predict.predict(dtest)
assert np.allclose(predict0, predict1)
assert np.allclose(predict0, cpu_predict)
@pytest.mark.skipif(**tm.no_sklearn())
def test_sklearn(self):
m, n = 15000, 14
tr_size = 2500
X = np.random.rand(m, n)
y = 200 * np.matmul(X, np.arange(-3, -3 + n))
X_train, y_train = X[:tr_size, :], y[:tr_size]
X_test, y_test = X[tr_size:, :], y[tr_size:]
# First with cpu_predictor
params = {'tree_method': 'gpu_hist',
'predictor': 'cpu_predictor',
'n_jobs': -1,
'seed': 123}
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
cpu_train_score = m.score(X_train, y_train)
cpu_test_score = m.score(X_test, y_test)
# Now with gpu_predictor
params['predictor'] = 'gpu_predictor'
m = xgb.XGBRegressor(**params).fit(X_train, y_train)
gpu_train_score = m.score(X_train, y_train)
gpu_test_score = m.score(X_test, y_test)
assert np.allclose(cpu_train_score, gpu_train_score)
assert np.allclose(cpu_test_score, gpu_test_score)
def run_inplace_base_margin(self, booster, dtrain, X, base_margin):
import cupy as cp
dtrain.set_info(base_margin=base_margin)
from_inplace = booster.inplace_predict(data=X, base_margin=base_margin)
from_dmatrix = booster.predict(dtrain)
cp.testing.assert_allclose(from_inplace, from_dmatrix)
@pytest.mark.skipif(**tm.no_cupy())
def test_inplace_predict_cupy(self):
import cupy as cp
cp.cuda.runtime.setDevice(0)
rows = 1000
cols = 10
missing = 11 # set to integer for testing
cp_rng = cp.random.RandomState(1994)
cp.random.set_random_state(cp_rng)
X = cp.random.randn(rows, cols)
missing_idx = [i for i in range(0, cols, 4)]
X[:, missing_idx] = missing # set to be missing
y = cp.random.randn(rows)
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({'tree_method': 'gpu_hist'}, dtrain, num_boost_round=10)
test = xgb.DMatrix(X[:10, ...], missing=missing)
predt_from_array = booster.inplace_predict(X[:10, ...], missing=missing)
predt_from_dmatrix = booster.predict(test)
cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix)
def predict_dense(x):
inplace_predt = booster.inplace_predict(x)
d = xgb.DMatrix(x)
copied_predt = cp.array(booster.predict(d))
return cp.all(copied_predt == inplace_predt)
# Don't do this on Windows, see issue #5793
if sys.platform.startswith("win"):
pytest.skip(
'Multi-threaded in-place prediction with cuPy is not working on Windows')
for i in range(10):
run_threaded_predict(X, rows, predict_dense)
base_margin = cp_rng.randn(rows)
self.run_inplace_base_margin(booster, dtrain, X, base_margin)
# Create a wide dataset
X = cp_rng.randn(100, 10000)
y = cp_rng.randn(100)
missing_idx = [i for i in range(0, X.shape[1], 16)]
X[:, missing_idx] = missing
reg = xgb.XGBRegressor(tree_method="gpu_hist", n_estimators=8, missing=missing)
reg.fit(X, y)
gpu_predt = reg.predict(X)
reg.set_params(predictor="cpu_predictor")
cpu_predt = reg.predict(X)
np.testing.assert_allclose(gpu_predt, cpu_predt, atol=1e-6)
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.skipif(**tm.no_cudf())
def test_inplace_predict_cudf(self):
import cupy as cp
import cudf
import pandas as pd
rows = 1000
cols = 10
rng = np.random.RandomState(1994)
cp.cuda.runtime.setDevice(0)
X = rng.randn(rows, cols)
X = pd.DataFrame(X)
y = rng.randn(rows)
X = cudf.from_pandas(X)
dtrain = xgb.DMatrix(X, y)
booster = xgb.train({'tree_method': 'gpu_hist'},
dtrain, num_boost_round=10)
test = xgb.DMatrix(X)
predt_from_array = booster.inplace_predict(X)
predt_from_dmatrix = booster.predict(test)
cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix)
def predict_df(x):
# column major array
inplace_predt = booster.inplace_predict(x.values)
d = xgb.DMatrix(x)
copied_predt = cp.array(booster.predict(d))
assert cp.all(copied_predt == inplace_predt)
inplace_predt = booster.inplace_predict(x)
return cp.all(copied_predt == inplace_predt)
for i in range(10):
run_threaded_predict(X, rows, predict_df)
base_margin = cudf.Series(rng.randn(rows))
self.run_inplace_base_margin(booster, dtrain, X, base_margin)
@given(strategies.integers(1, 10),
tm.dataset_strategy, shap_parameter_strategy)
@settings(deadline=None)
def test_shap(self, num_rounds, dataset, param):
param.update({"predictor": "gpu_predictor", "gpu_id": 0})
param = dataset.set_params(param)
dmat = dataset.get_dmat()
bst = xgb.train(param, dmat, num_rounds)
test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin)
shap = bst.predict(test_dmat, pred_contribs=True)
margin = bst.predict(test_dmat, output_margin=True)
assume(len(dataset.y) > 0)
assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-3, 1e-3)
@given(strategies.integers(1, 10),
tm.dataset_strategy, shap_parameter_strategy)
@settings(deadline=None, max_examples=20)
def test_shap_interactions(self, num_rounds, dataset, param):
param.update({"predictor": "gpu_predictor", "gpu_id": 0})
param = dataset.set_params(param)
dmat = dataset.get_dmat()
bst = xgb.train(param, dmat, num_rounds)
test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin)
shap = bst.predict(test_dmat, pred_interactions=True)
margin = bst.predict(test_dmat, output_margin=True)
assume(len(dataset.y) > 0)
assert np.allclose(np.sum(shap, axis=(len(shap.shape) - 1, len(shap.shape) - 2)),
margin,
1e-3, 1e-3)
def test_shap_categorical(self):
X, y = tm.make_categorical(100, 20, 7, False)
Xy = xgb.DMatrix(X, y, enable_categorical=True)
booster = xgb.train({"tree_method": "gpu_hist"}, Xy, num_boost_round=10)
booster.set_param({"predictor": "gpu_predictor"})
shap = booster.predict(Xy, pred_contribs=True)
margin = booster.predict(Xy, output_margin=True)
np.testing.assert_allclose(
np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3
)
booster.set_param({"predictor": "cpu_predictor"})
shap = booster.predict(Xy, pred_contribs=True)
margin = booster.predict(Xy, output_margin=True)
np.testing.assert_allclose(
np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3
)
def test_predict_leaf_basic(self):
gpu_leaf = run_predict_leaf('gpu_predictor')
cpu_leaf = run_predict_leaf('cpu_predictor')
np.testing.assert_equal(gpu_leaf, cpu_leaf)
def run_predict_leaf_booster(self, param, num_rounds, dataset):
param = dataset.set_params(param)
m = dataset.get_dmat()
booster = xgb.train(param, dtrain=dataset.get_dmat(), num_boost_round=num_rounds)
booster.set_param({'predictor': 'cpu_predictor'})
cpu_leaf = booster.predict(m, pred_leaf=True)
booster.set_param({'predictor': 'gpu_predictor'})
gpu_leaf = booster.predict(m, pred_leaf=True)
np.testing.assert_equal(cpu_leaf, gpu_leaf)
@given(predict_parameter_strategy, tm.dataset_strategy)
@settings(deadline=None)
def test_predict_leaf_gbtree(self, param, dataset):
param['booster'] = 'gbtree'
param['tree_method'] = 'gpu_hist'
self.run_predict_leaf_booster(param, 10, dataset)
@given(predict_parameter_strategy, tm.dataset_strategy)
@settings(deadline=None)
def test_predict_leaf_dart(self, param, dataset):
param['booster'] = 'dart'
param['tree_method'] = 'gpu_hist'
self.run_predict_leaf_booster(param, 10, dataset)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.skipif(**tm.no_pandas())
@given(df=data_frames([column('x0', elements=strategies.integers(min_value=0, max_value=3)),
column('x1', elements=strategies.integers(min_value=0, max_value=5))],
index=range_indexes(min_size=20, max_size=50)))
@settings(deadline=None)
def test_predict_categorical_split(self, df):
from sklearn.metrics import mean_squared_error
df = df.astype('category')
x0, x1 = df['x0'].to_numpy(), df['x1'].to_numpy()
y = (x0 * 10 - 20) + (x1 - 2)
dtrain = xgb.DMatrix(df, label=y, enable_categorical=True)
params = {
'tree_method': 'gpu_hist', 'predictor': 'gpu_predictor',
'max_depth': 3, 'learning_rate': 1.0, 'base_score': 0.0, 'eval_metric': 'rmse'
}
eval_history = {}
bst = xgb.train(params, dtrain, num_boost_round=5, evals=[(dtrain, 'train')],
verbose_eval=False, evals_result=eval_history)
pred = bst.predict(dtrain)
rmse = mean_squared_error(y_true=y, y_pred=pred, squared=False)
np.testing.assert_almost_equal(rmse, eval_history['train']['rmse'][-1], decimal=5)
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.parametrize("n_classes", [2, 3])
def test_predict_dart(self, n_classes):
from sklearn.datasets import make_classification
import cupy as cp
n_samples = 1000
X_, y_ = make_classification(
n_samples=n_samples, n_informative=5, n_classes=n_classes
)
X, y = cp.array(X_), cp.array(y_)
Xy = xgb.DMatrix(X, y)
if n_classes == 2:
params = {
"tree_method": "gpu_hist",
"booster": "dart",
"rate_drop": 0.5,
"objective": "binary:logistic"
}
else:
params = {
"tree_method": "gpu_hist",
"booster": "dart",
"rate_drop": 0.5,
"objective": "multi:softprob",
"num_class": n_classes
}
booster = xgb.train(params, Xy, num_boost_round=32)
# predictor=auto
inplace = booster.inplace_predict(X)
copied = booster.predict(Xy)
cpu_inplace = booster.inplace_predict(X_)
booster.set_param({"predictor": "cpu_predictor"})
cpu_copied = booster.predict(Xy)
copied = cp.array(copied)
cp.testing.assert_allclose(cpu_inplace, copied, atol=1e-6)
cp.testing.assert_allclose(cpu_copied, copied, atol=1e-6)
cp.testing.assert_allclose(inplace, copied, atol=1e-6)
booster.set_param({"predictor": "gpu_predictor"})
inplace = booster.inplace_predict(X)
copied = booster.predict(Xy)
copied = cp.array(copied)
cp.testing.assert_allclose(inplace, copied, atol=1e-6)
@pytest.mark.skipif(**tm.no_cupy())
def test_dtypes(self):
import cupy as cp
rows = 1000
cols = 10
rng = cp.random.RandomState(1994)
orig = rng.randint(low=0, high=127, size=rows * cols).reshape(
rows, cols
)
y = rng.randint(low=0, high=127, size=rows)
dtrain = xgb.DMatrix(orig, label=y)
booster = xgb.train({"tree_method": "gpu_hist"}, dtrain)
predt_orig = booster.inplace_predict(orig)
# all primitive types in numpy
for dtype in [
cp.signedinteger,
cp.byte,
cp.short,
cp.intc,
cp.int_,
cp.longlong,
cp.unsignedinteger,
cp.ubyte,
cp.ushort,
cp.uintc,
cp.uint,
cp.ulonglong,
cp.floating,
cp.half,
cp.single,
cp.double,
]:
X = cp.array(orig, dtype=dtype)
predt = booster.inplace_predict(X)
cp.testing.assert_allclose(predt, predt_orig)
# boolean
orig = cp.random.binomial(1, 0.5, size=rows * cols).reshape(
rows, cols
)
predt_orig = booster.inplace_predict(orig)
for dtype in [cp.bool8, cp.bool_]:
X = cp.array(orig, dtype=dtype)
predt = booster.inplace_predict(X)
cp.testing.assert_allclose(predt, predt_orig)
# unsupported types
for dtype in [
cp.complex64,
cp.complex128,
]:
X = cp.array(orig, dtype=dtype)
with pytest.raises(ValueError):
booster.inplace_predict(X)
|
apache-2.0
|
mne-tools/mne-tools.github.io
|
0.22/_downloads/520da5b3196d8a35a9e43f3be08c2b5f/plot_eeglab_head_sphere.py
|
5
|
4832
|
"""
.. _ex-topomap-eeglab-style:
========================================
How to plot topomaps the way EEGLAB does
========================================
If you have previous EEGLAB experience you may have noticed that topomaps
(topoplots) generated using MNE-Python look a little different from those
created in EEGLAB. If you prefer the EEGLAB style this example will show you
how to calculate head sphere origin and radius to obtain EEGLAB-like channel
layout in MNE.
"""
# Authors: Mikołaj Magnuski <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
import mne
print(__doc__)
###############################################################################
# Create fake data
# ----------------
#
# First we will create a simple evoked object with a single timepoint using
# biosemi 10-20 channel layout.
biosemi_montage = mne.channels.make_standard_montage('biosemi64')
n_channels = len(biosemi_montage.ch_names)
fake_info = mne.create_info(ch_names=biosemi_montage.ch_names, sfreq=250.,
ch_types='eeg')
rng = np.random.RandomState(0)
data = rng.normal(size=(n_channels, 1)) * 1e-6
fake_evoked = mne.EvokedArray(data, fake_info)
fake_evoked.set_montage(biosemi_montage)
###############################################################################
# Calculate sphere origin and radius
# ----------------------------------
#
# EEGLAB plots head outline at the level where the head circumference is
# measured
# in the 10-20 system (a line going through Fpz, T8/T4, Oz and T7/T3 channels).
# MNE-Python places the head outline lower on the z dimension, at the level of
# the anatomical landmarks :term:`LPA, RPA, and NAS <fiducial point>`.
# Therefore to use the EEGLAB layout we
# have to move the origin of the reference sphere (a sphere that is used as a
# reference when projecting channel locations to a 2d plane) a few centimeters
# up.
#
# Instead of approximating this position by eye, as we did in :ref:`the sensor
# locations tutorial <tut-sensor-locations>`, here we will calculate it using
# the position of Fpz, T8, Oz and T7 channels available in our montage.
# first we obtain the 3d positions of selected channels
check_ch = ['Oz', 'Fpz', 'T7', 'T8']
ch_idx = [fake_evoked.ch_names.index(ch) for ch in check_ch]
pos = np.stack([fake_evoked.info['chs'][idx]['loc'][:3] for idx in ch_idx])
# now we calculate the radius from T7 and T8 x position
# (we could use Oz and Fpz y positions as well)
radius = np.abs(pos[[2, 3], 0]).mean()
# then we obtain the x, y, z sphere center this way:
# x: x position of the Oz channel (should be very close to 0)
# y: y position of the T8 channel (should be very close to 0 too)
# z: average z position of Oz, Fpz, T7 and T8 (their z position should be the
# the same, so we could also use just one of these channels), it should be
# positive and somewhere around `0.03` (3 cm)
x = pos[0, 0]
y = pos[-1, 1]
z = pos[:, -1].mean()
# lets print the values we got:
print([f'{v:0.5f}' for v in [x, y, z, radius]])
###############################################################################
# Compare MNE and EEGLAB channel layout
# -------------------------------------
#
# We already have the required x, y, z sphere center and its radius — we can
# use these values passing them to the ``sphere`` argument of many
# topo-plotting functions (by passing ``sphere=(x, y, z, radius)``).
# create a two-panel figure with some space for the titles at the top
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), gridspec_kw=dict(top=0.9),
sharex=True, sharey=True)
# we plot the channel positions with default sphere - the mne way
fake_evoked.plot_sensors(axes=ax[0], show=False)
# in the second panel we plot the positions using the EEGLAB reference sphere
fake_evoked.plot_sensors(sphere=(x, y, z, radius), axes=ax[1], show=False)
# add titles
ax[0].set_title('MNE channel projection', fontweight='bold')
ax[1].set_title('EEGLAB channel projection', fontweight='bold')
###############################################################################
# Topomaps (topoplots)
# --------------------
#
# As the last step we do the same, but plotting the topomaps. These will not
# be particularly interesting as they will show random data but hopefully you
# will see the difference.
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), gridspec_kw=dict(top=0.9),
sharex=True, sharey=True)
mne.viz.plot_topomap(fake_evoked.data[:, 0], fake_evoked.info, axes=ax[0],
show=False)
mne.viz.plot_topomap(fake_evoked.data[:, 0], fake_evoked.info, axes=ax[1],
show=False, sphere=(x, y, z, radius))
# add titles
ax[0].set_title('MNE', fontweight='bold')
ax[1].set_title('EEGLAB', fontweight='bold')
|
bsd-3-clause
|
burakbayramli/dersblog
|
stat/stat_137_collab/movielens_prep.py
|
2
|
1445
|
'''
Creates a new matrix from Movielens data with users as rows and items
as columns with necessary fields one-hot-encoded
'''
from sklearn.feature_extraction import DictVectorizer
import pandas as pd
import os
def one_hot_dataframe(data, cols, replace=False):
vec = DictVectorizer()
mkdict = lambda row: dict((col, row[col]) for col in cols)
vecData = pd.DataFrame(vec.fit_transform(data[cols].to_dict(outtype='records')).toarray())
vecData.columns = vec.get_feature_names()
vecData.index = data.index
if replace is True:
data = data.drop(cols, axis=1)
data = data.join(vecData)
return (data, vecData, vec)
rnames = ['user_id', 'movie_id', 'rating', 'timestamp']
ratings = pd.read_table('ratings.dat', sep='::', header=None,names=rnames)
df2 = ratings.pivot_table('rating', rows='user_id',cols='movie_id')
df2.to_csv("/tmp/out1.csv",sep=';')
import pandas as pd
unames = ['user_id', 'gender', 'age', 'occupation', 'zip']
users = pd.read_table('users.dat', sep='::', header=None,names=unames)
users = users.drop('zip',axis=1)
users['occupation'] = users['occupation'].map(lambda x: str(x))
df2, _, _ = one_hot_dataframe(users, ['gender','occupation'],True)
df2.to_csv("/tmp/out2.csv",sep=';',index=None)
df1 = pd.read_csv("/tmp/out1.csv",sep=';')
df2 = pd.read_csv("/tmp/out2.csv",sep=';')
df3 = pd.merge(df1,df2)
df3.to_csv("%s/Downloads/movielens.csv" % os.environ['HOME'],sep=';',index=None)
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.