repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hlin117/scikit-learn | sklearn/linear_model/omp.py | 7 | 31863 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_offset, y_offset, X_scale)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues : array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, ensure_min_features=2,
estimator=self)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv.split(X))
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
carlsonp/kaggle-TrulyNative | btb_native_basic3.py | 1 | 13933 | """
Beating the Benchmark
Truly Native?
__author__ : David Shinn, modified by firefly2442
"""
from __future__ import print_function
import glob, multiprocessing, os, re, sys, time, pickle, random
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
#https://pypi.python.org/pypi/etaprogress/
from etaprogress.progress import ProgressBar
#https://pypi.python.org/pypi/tld/
from tld import get_tld
class ResultFileEntry(object):
#https://utcc.utoronto.ca/~cks/space/blog/python/WhatSlotsAreGoodFor
#http://tech.oyster.com/save-ram-with-python-slots/
#https://stackoverflow.com/questions/1336791/dictionary-vs-object-which-is-more-efficient-and-why
#https://stackoverflow.com/questions/472000/python-slots
# We use slots here to save memory, a dynamic dictionary is not needed
__slots__ = ["file", "sponsored", "lines", "spaces", "tabs", "braces", "brackets", "parentheses", "words", "length", "cleaned_lines",
"cleaned_spaces", "cleaned_tabs", "cleaned_words", "cleaned_length", "http_total_links", "other_total_links",
"domain_com", "domain_edu", "domain_gov", "domain_org", "domain_net", "http_pruned_links", "sponsored_links",
"nonsponsored_links", "unknown_links", "title_length", "javascript_scripts_count", "javascript_length",
"img_count", "meta_length", "meta_keywords", "meta_description_length", "meta_redirect", "iframe_count", "word_ad", "word_sponsored",
"word_advertisement", "word_money", "word_cash", "word_prize", "word_buy", "word_sell", "word_cookie", "word_redirect",
"word_free", "word_affiliate", "word_banner", "word_scentsy", "word_adword", "word_marketing", "word_track", "robot",
"XMLHttpRequest_count", "js_redirect", "ajax", "document_write", "url_behance", "url_incapsula", "url_godaddy_park",
"url_amazon"]
def __init__(self, filename):
self.file = filename
self.sponsored = None #make sure this is none because there is a check for this later during training
self.lines = 0
self.spaces = 0
self.tabs = 0
self.braces = 0
self.brackets = 0
self.parentheses = 0
self.words = 0
self.length = 0
self.cleaned_lines = 0
self.cleaned_spaces = 0
self.cleaned_tabs = 0
self.cleaned_words = 0
self.cleaned_length = 0
self.http_total_links = 0
self.other_total_links = 0
self.domain_com = 0
self.domain_edu = 0
self.domain_gov = 0
self.domain_org = 0
self.domain_net = 0
self.http_pruned_links = 0
self.sponsored_links = []
self.nonsponsored_links = []
self.unknown_links = []
self.title_length = 0
self.javascript_scripts_count = 0
self.javascript_length = 0
self.img_count = 0
self.meta_length = 0
self.meta_keywords = 0
self.meta_description_length = 0
self.meta_redirect = 0
self.iframe_count = 0
self.word_ad = 0
self.word_sponsored = 0
self.word_advertisement = 0
self.word_money = 0
self.word_cash = 0
self.word_prize = 0
self.word_buy = 0
self.word_sell = 0
self.word_cookie = 0
self.word_redirect = 0
self.word_free = 0
self.word_affiliate = 0
self.word_banner = 0
self.word_scentsy = 0
self.word_adword = 0
self.word_marketing = 0
self.word_track = 0
self.robot = 0 #robots.txt information
self.XMLHttpRequest_count = 0
self.js_redirect = 0
self.ajax = 0
self.document_write = 0
self.url_behance = 0
self.url_incapsula = 0
self.url_godaddy_park = 0
self.url_amazon = 0
#https://docs.python.org/2.7/reference/datamodel.html#special-method-names
def __iter__(self):
yield self.file;
yield self.sponsored;
yield self.lines;
yield self.spaces;
yield self.tabs;
yield self.braces;
yield self.brackets;
yield self.parentheses;
yield self.words;
yield self.length;
yield self.cleaned_lines;
yield self.cleaned_spaces;
yield self.cleaned_tabs;
yield self.cleaned_words;
yield self.cleaned_length;
yield self.http_total_links;
yield self.other_total_links;
yield self.domain_com;
yield self.domain_edu;
yield self.domain_gov;
yield self.domain_org;
yield self.domain_net;
yield self.http_pruned_links;
yield self.sponsored_links;
yield self.nonsponsored_links;
yield self.unknown_links;
yield self.title_length;
yield self.javascript_scripts_count;
yield self.javascript_length;
yield self.img_count;
yield self.meta_length;
yield self.meta_keywords;
yield self.meta_description_length;
yield self.meta_redirect;
yield self.iframe_count;
yield self.word_ad;
yield self.word_sponsored;
yield self.word_advertisement;
yield self.word_money;
yield self.word_cash;
yield self.word_prize;
yield self.word_buy;
yield self.word_sell;
yield self.word_cookie;
yield self.word_redirect;
yield self.word_free;
yield self.word_affiliate;
yield self.word_banner;
yield self.word_scentsy;
yield self.word_adword;
yield self.word_marketing;
yield self.word_track;
yield self.robot;
yield self.XMLHttpRequest_count;
yield self.js_redirect;
yield self.ajax;
yield self.document_write;
yield self.url_behance;
yield self.url_incapsula;
yield self.url_godaddy_park;
yield self.url_amazon;
def create_data(filepath):
urls = set() #this way we don't have duplicate URLs
filename = os.path.basename(filepath)
with open(filepath, 'rb') as infile:
text = infile.read()
values = ResultFileEntry(filename)
if filename in train_keys:
values.sponsored = train_keys[filename]
values.lines = text.count('\n')
values.spaces = text.count(' ')
values.tabs = text.count('\t')
values.braces = text.count('{')
values.brackets = text.count('[')
values.parentheses = text.count('(')
values.words = len(re.split('\s+', text))
values.length = len(text)
values.url_behance = text.count('behance.net')
values.url_incapsula = text.count('incapsula.com')
values.url_godaddy_park = text.count('mcc.godaddy.com/park/')
values.url_amazon = text.count('rcm.amazon.com')
#use lxml parser for faster speed
parsed = BeautifulSoup(text, "lxml")
cleaned = parsed.getText()
values.cleaned_lines = cleaned.count('\n')
values.cleaned_spaces = cleaned.count(' ')
values.cleaned_tabs = cleaned.count('\t')
values.cleaned_words = len(re.split('\s+', cleaned))
values.cleaned_length = len(cleaned)
for anchor in parsed.findAll('a', href=True):
if anchor['href'].startswith("http"):
#count of different generic top level domains (.com, .edu, .gov, etc.)
try:
res = get_tld(anchor['href'], as_object=True, fail_silently=True)
if res:
if res.suffix == 'com':
values.domain_com += 1
elif res.suffix == 'edu':
values.domain_edu += 1
elif res.suffix == 'gov':
values.domain_gov += 1
elif res.suffix == 'org':
values.domain_org += 1
elif res.suffix == 'net':
values.domain_net += 1
values.http_total_links += 1
#very important to use str(res) here, otherwise it adds the object res and not the string
#resulting in a huge number of urls to parse (and store in memory!)
urls.add(str(res))
except ValueError:
print("IPv6 URL?")
else:
values.other_total_links += 1
values.http_pruned_links = len(urls)
if filename in train_keys:
if train_keys[filename] == 1:
values.sponsored_links = list(urls)
elif train_keys[filename] == 0:
values.nonsponsored_links = list(urls)
else:
values.unknown_links = list(urls)
if parsed.title and parsed.title.string:
values.title_length = len(parsed.title.string)
javascript = parsed.findAll('script')
values.javascript_scripts_count = len(javascript)
for j in javascript:
values.javascript_length += len(j)
values.img_count = len(parsed.findAll('img'))
for meta in parsed.findAll('meta'):
if meta.has_attr('content'):
values.meta_length += len(meta['content'])
for meta in parsed.findAll('meta', attrs={"name":"keywords"}):
if meta.has_attr('content'):
values.meta_keywords += len(meta['content'].split(","))
for meta in parsed.findAll('meta', attrs={"name":"description"}):
if meta.has_attr('content'):
values.meta_description_length += len(meta['content'])
for meta in parsed.findAll('meta', attrs={"name":"robots"}):
if meta.has_attr('content'):
values.robot = 1
#http://webmaster.iu.edu/tools-and-guides/maintenance/redirect-meta-refresh.phtml
for meta in parsed.findAll('meta', attrs={"http-equiv":"refresh"}):
if meta.has_attr('content'):
if "URL" in meta['content']:
values.meta_redirect = 1
for iframe in parsed.findAll('iframe'):
if iframe.has_attr('src'):
values.iframe_count += 1
values.word_ad = len(re.findall("\sad[s]*", cleaned.lower())) #ad or ads
values.word_sponsored = len(re.findall("\ssponsor[ed]*", cleaned.lower())) #sponsor or sponsored
values.word_advertisement = len(re.findall("\sadvertise[ment]*", cleaned.lower())) #advertise or advertisement
values.word_money = len(re.findall("\smoney", cleaned.lower())) #money
values.word_cash = len(re.findall("\scash", cleaned.lower())) #cash
values.word_prize = len(re.findall("\sprize", cleaned.lower())) #prize
values.word_buy = len(re.findall("\sbuy", cleaned.lower())) #buy
values.word_sell = len(re.findall("\ssell", cleaned.lower())) #sell
values.word_cookie = len(re.findall("\scookie[s]*", text.lower())) #cookie or cookies
values.word_redirect = len(re.findall("\sredirect[ed]*", text.lower())) #redirect or redirected
values.word_free = len(re.findall("\sfree", text.lower())) #free
values.word_affiliate = len(re.findall("\saffiliate[d]*", text.lower())) #affiliate or affiliated
values.word_banner = len(re.findall("\sbanner[s]*", text.lower())) #banner or banners
values.word_scentsy = len(re.findall("\sscentsy", text.lower())) #scentsy
values.word_adword = len(re.findall("\sadword[s]*", text.lower())) #adword or adwords
values.word_marketing = len(re.findall("\smarketing", text.lower())) #marketing
values.word_track = len(re.findall("\strack[ing]*", text.lower())) #track or tracking
values.XMLHttpRequest_count = len(re.findall("xmlhttprequest", text.lower())) #XMLHttpRequest
values.js_redirect = len(re.findall("window\.location|window\.location\.href|window\.location\.assign|window\.location\.replace|window\.location\.host|window\.top\.location", text.lower())) #different possibilites for javascript redirects
values.ajax = len(re.findall("\$\.ajax\(\{", text.lower())) #ajax call: $.ajax({
values.document_write = len(re.findall("document.write", text.lower())) #document.write
return values
print('--- Read training labels')
train = pd.read_csv('./data/train_v2.csv')
train_keys = dict([a[1] for a in train.iterrows()])
del train #free up this memory?
filepaths = glob.glob('data/*/*.txt')
#random.shuffle(filepaths)
#filepaths = filepaths[0:1000]
num_tasks = len(filepaths)
bar = ProgressBar(num_tasks, max_width=40)
p = multiprocessing.Pool()
#imap_unordered means we don't care about the order of the returned results
results = p.imap_unordered(create_data, filepaths) #chunksize default = 1
print("--- Started processing")
while (True):
bar.numerator = results._index
print(bar, end='\r')
sys.stdout.flush()
time.sleep(1)
if (results._index == num_tasks): break
p.close()
p.join()
print()
column_names = ["file", "sponsored", "lines", "spaces", "tabs", "braces", "brackets", "parentheses", "words", "length", "cleaned_lines",
"cleaned_spaces", "cleaned_tabs", "cleaned_words", "cleaned_length", "http_total_links", "other_total_links",
"domain_com", "domain_edu", "domain_gov", "domain_org", "domain_net", "http_pruned_links", "sponsored_links",
"nonsponsored_links", "unknown_links", "title_length", "javascript_scripts_count", "javascript_length",
"img_count", "meta_length", "meta_keywords", "meta_description_length", "meta_redirect", "iframe_count", "word_ad", "word_sponsored",
"word_advertisement", "word_money", "word_cash", "word_prize", "word_buy", "word_sell", "word_cookie", "word_redirect",
"word_free", "word_affiliate", "word_banner", "word_scentsy", "word_adword", "word_marketing", "word_track", "robot",
"XMLHttpRequest_count", "js_redirect", "ajax", "document_write", "url_behance", "url_incapsula", "url_godaddy_park",
"url_amazon"]
df_full = pd.DataFrame(list(results), columns=column_names)
with pd.option_context('display.max_rows', 20, 'display.max_columns', len(column_names)):
print(df_full)
#create correlation table
cor = df_full.corr(method='pearson')
cor = np.round(cor, decimals=2) #round to 2 decimal places
cor.to_csv('correlations.csv')
#print(df_full.columns.values)
print("--- Calculating link ratios")
#calculate counts for training data
sponsored_counts = {}
nonsponsored_counts = {}
for index, row in df_full.iterrows():
for url in row['sponsored_links']:
if url in sponsored_counts:
sponsored_counts[url] += 1
else:
sponsored_counts[url] = 1
for url in row['nonsponsored_links']:
if url in nonsponsored_counts:
nonsponsored_counts[url] += 1
else:
nonsponsored_counts[url] = 1
df_full['sponsored_ratio'] = None #create empty column in dataframe
#calculate ratio of sponsored to nonsponsored links
for index, row in df_full.iterrows():
websites = row['sponsored_links'] + row['nonsponsored_links'] + row['unknown_links']
l = []
for website in websites:
sponsoredCount = float(0)
notsponsoredCount = float(0)
if website in sponsored_counts:
sponsoredCount = float(sponsored_counts.get(website))
if website in nonsponsored_counts:
notsponsoredCount = float(nonsponsored_counts.get(website))
ratio = float(0)
if sponsoredCount != 0:
ratio = sponsoredCount / (sponsoredCount + notsponsoredCount)
l.append(ratio)
if len(l) != 0:
df_full.set_value(index, 'sponsored_ratio', sum(l)/len(l))
else:
df_full.set_value(index, 'sponsored_ratio', 0)
#remove old data that we don't need for the prediction
df_full = df_full.drop(['sponsored_links', 'nonsponsored_links', 'unknown_links'], 1)
pickle.dump(df_full, open("df_full.p", "wb"))
| gpl-3.0 |
Chuban/moose | python/peacock/tests/postprocessor_tab/test_PostprocessorPluginManager.py | 5 | 5043 | #!/usr/bin/env python
import sys
import os
import unittest
import subprocess
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.PostprocessorDataWidget import PostprocessorDataWidget
from peacock.PostprocessorViewer.PostprocessorPluginManager import main
from peacock.utils import Testing
import mooseutils
class TestPostprocessorPluginManager(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
@classmethod
def setUpClass(cls):
"""
Clean up from previous testing.
"""
super(TestPostprocessorPluginManager, cls).setUpClass()
names = ['{}_test_script.py'.format(cls.__name__), '{}_test_output.pdf'.format(cls.__name__), '{}_test_output.png'.format(cls.__name__)]
for name in names:
if os.path.exists(name):
os.remove(name)
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
data = [PostprocessorDataWidget(mooseutils.PostprocessorReader('../input/white_elephant_jan_2016.csv'))]
self._widget, self._window = main()
self._widget.FigurePlugin.setFixedSize(QtCore.QSize(500, 500))
self._widget.call('onSetData', data)
def plot(self):
"""
Create plot with all widgets modified.
"""
# Plot some data
toggle = self._widget.PostprocessorSelectPlugin._groups[0]._toggles['air_temp_set_1']
toggle.CheckBox.setCheckState(QtCore.Qt.Checked)
toggle.PlotAxis.setCurrentIndex(1)
toggle.LineStyle.setCurrentIndex(1)
toggle.LineWidth.setValue(5)
toggle.clicked.emit()
# Add title and legend
ax = self._widget.AxesSettingsPlugin
ax.Title.setText('Snow Data')
ax.Title.editingFinished.emit()
ax.Legend2.setCheckState(QtCore.Qt.Checked)
ax.Legend2.clicked.emit(True)
ax.Legend2Location.setCurrentIndex(4)
ax.Legend2Location.currentIndexChanged.emit(4)
ax.onAxesModified()
# Set limits and axis titles (y2-only)
ax = self._widget.AxisTabsPlugin.Y2AxisTab
ax.Label.setText('Air Temperature [C]')
ax.Label.editingFinished.emit()
ax.RangeMinimum.setText('0')
ax.RangeMinimum.editingFinished.emit()
def testWidgets(self):
"""
Test that the widgets contained in PostprocessorPlotWidget are working.
"""
self.plot()
self.assertImage('testWidgets.png')
def testOutput(self):
"""
Test that the python output is working.
"""
self.plot()
# Write the python script
output = self._widget.OutputPlugin
name = '{}_test_script.py'.format(self.__class__.__name__)
output.write.emit(name)
self.assertTrue(os.path.exists(name))
# Compare with gold
with open(name, 'r') as fid:
script = fid.read()
with open(os.path.join('gold', name), 'r') as fid:
gold_script = fid.read()
self.assertEqual(script.strip('\n'), gold_script.strip('\n'))
# Remove the show from the script and make it output a png
script = script.replace('plt.show()', '')
script = script.replace('output.pdf', 'output.png')
with open(name, 'w') as fid:
fid.write(script)
subprocess.call(['python', name], stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
self.assertTrue(os.path.exists('output.png'))
differ = mooseutils.ImageDiffer(os.path.join('gold', 'output.png'), 'output.png', allowed=0.99)
print differ.message()
self.assertFalse(differ.fail(), "{} does not match the gold file.".format(name))
# Test pdf output
name = '{}_test_output.pdf'.format(self.__class__.__name__)
output.write.emit(name)
self.assertTrue(os.path.exists(name))
# Test png output
name = '{}_test_output.png'.format(self.__class__.__name__)
output.write.emit(name)
self.assertTrue(os.path.exists(name))
goldname = os.path.join('gold', name)
differ = mooseutils.ImageDiffer(goldname, name, allowed=0.99)
self.assertFalse(differ.fail(), "{} does not match the gold file.".format(name))
def testLiveScript(self):
"""
Tests that live script widget.
"""
self._widget.OutputPlugin.LiveScriptButton.clicked.emit()
self.assertTrue(self._widget.OutputPlugin.LiveScript.isVisible())
self.assertIn("plt.figure", self._widget.OutputPlugin.LiveScript.toPlainText())
self.plot()
self._widget.OutputPlugin.onAxesModified()
self.assertIn("markersize=1", self._widget.OutputPlugin.LiveScript.toPlainText())
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
glneo/gnuradio-davisaf | gnuradio-core/src/examples/pfb/synth_to_chan.py | 17 | 3587 | #!/usr/bin/env python
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys
try:
import scipy
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
fmtx = list()
for fi in freqs:
s = gr.sig_source_f(fs, gr.GR_SIN_WAVE, fi, 1)
fm = blks2.nbfm_tx (fs, 4*fs, max_dev=10000, tau=75e-6)
sigs.append(s)
fmtx.append(fm)
syntaps = gr.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Synthesis Num. Taps = %d (taps per filter = %d)" % (len(syntaps),
len(syntaps)/nchans)
chtaps = gr.firdes.low_pass_2(len(freqs), fs, fs/float(nchans)/2, 100, 100)
print "Channelizer Num. Taps = %d (taps per filter = %d)" % (len(chtaps),
len(chtaps)/nchans)
filtbank = gr.pfb_synthesizer_ccf(nchans, syntaps)
channelizer = blks2.pfb_channelizer_ccf(nchans, chtaps)
noise_level = 0.01
head = gr.head(gr.sizeof_gr_complex, N)
noise = gr.noise_source_c(gr.GR_GAUSSIAN, noise_level)
addnoise = gr.add_cc()
snk_synth = gr.vector_sink_c()
tb = gr.top_block()
tb.connect(noise, (addnoise,0))
tb.connect(filtbank, head, (addnoise, 1))
tb.connect(addnoise, channelizer)
tb.connect(addnoise, snk_synth)
snk = list()
for i,si in enumerate(sigs):
tb.connect(si, fmtx[i], (filtbank, i))
for i in xrange(nchans):
snk.append(gr.vector_sink_c())
tb.connect((channelizer, i), snk[i])
tb.run()
if 1:
channel = 1
data = snk[channel].data()[1000:]
f1 = pylab.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(data[10000:10200] )
s1.set_title(("Output Signal from Channel %d" % channel))
fftlen = 2048
winfunc = scipy.blackman
#winfunc = scipy.hamming
f2 = pylab.figure(2)
s2 = f2.add_subplot(1,1,1)
s2.psd(data, NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s2.set_title(("Output PSD from Channel %d" % channel))
f3 = pylab.figure(3)
s3 = f3.add_subplot(1,1,1)
s3.psd(snk_synth.data()[1000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen/4,
window = lambda d: d*winfunc(fftlen))
s3.set_title("Output of Synthesis Filter")
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
agrinh/pypatches | patchworks.py | 1 | 2629 | #!/usr/bin/python
import itertools
import sklearn.decomposition
import scipy.spatial
import numpy
from matcher import PCAMatcher, MeanColorMatcher
from patches import Patches
from utilities import crop, visual_compare
class Patchworks(object):
"""
Produces patchworks.
I.e. reproduces an image from a set of images using the represent method.
"""
def __init__(self, images, patch_shape, scale_factor=1, alternatives=1,
colorspace='rgb'):
if colorspace not in ('rgb', 'hsv'):
raise ValueError('Only supported colorspaces are rgb and hsv')
# store parameters
self.__colorspace = colorspace
self.__patch_shape = patch_shape
real_shape = (patch_shape[0] * scale_factor,
patch_shape[1] * scale_factor)
self.__images = [crop(image, real_shape) for image in images]
# prepare images
preprocessed = itertools.imap(self.preprocess, self.__images)
data = numpy.vstack(preprocessed)
self.match = MeanColorMatcher(data, alternatives)
# # # Helpers
def preprocess(self, patch):
"""
Perform image processing on patch before flattened.
"""
if patch.shape != self.__patch_shape:
cropped = crop(patch, self.__patch_shape)
if self.__colorspace == 'hsv':
cropped = matplotlib.colors.rgb_to_hsv(cropped)
return cropped.flatten().astype(numpy.float)
# # # Main interface
def replace(self, patch):
"""
Replace patch with one from library of images.
"""
point = self.preprocess(patch)
return self.__images[self.match(point)]
def represent(self, image):
"""
Create a patchwork representing the image.
"""
patches = Patches(image, self.__patch_shape)
replacement_patches = itertools.imap(self.replace, patches)
return patches.stack(replacement_patches)
def visualize(self, image):
patches = Patches(image, self.__patch_shape)
extract = lambda patch: self.match.transform(self.preprocess(patch))
patch_data = numpy.vstack(itertools.imap(extract, patches))
patch_data = patch_data[:, :3] # select the three principal components
visual_compare(self.match.data, patch_data)
@property
def component_images(self):
"""
Returns images of the principal components of the library of images.
"""
pca_images = (component.reshape(self.__patch_shape)
for component in self.match.components)
return pca_images
| mit |
geomf/omf-fork | omf/models/solarEngineering.py | 1 | 25005 | # Portions Copyrights (C) 2015 Intel Corporation
''' Powerflow results for one Gridlab instance. '''
import sys
import shutil
import datetime
import gc
import os
import math
import json
import networkx as nx
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
import multiprocessing
from os.path import join as pJoin
from os.path import split as pSplit
from jinja2 import Template
import traceback
import __metaModel__
from __metaModel__ import roundSig, getStatus, renderAndShow
# OMF imports
sys.path.append(__metaModel__._omfDir)
from omf import feeder
from omf.solvers import gridlabd
from pyhdfs import HdfsFileNotFoundException
from omf.weather import zipCodeToClimateName
import logging
logger = logging.getLogger(__name__)
template = None
def renderTemplate(template, fs, modelDir="", absolutePaths=False, datastoreNames={}):
''' Render the model template to an HTML string.
By default render a blank one for new input.
If modelDir is valid, render results post-model-run.
If absolutePaths, the HTML can be opened without a server. '''
# Our HTML template for the interface:
with fs.open("models/solarEngineering.html") as tempFile:
template = Template(tempFile.read())
try:
inJson = json.load(fs.open(pJoin(modelDir, "allInputData.json")))
modelPath, modelName = pSplit(modelDir)
deepPath, user = pSplit(modelPath)
inJson["modelName"] = modelName
inJson["user"] = user
allInputData = json.dumps(inJson)
except (IOError, HdfsFileNotFoundException):
allInputData = None
try:
allOutputData = fs.open(pJoin(modelDir, "allOutputData.json")).read()
except (HdfsFileNotFoundException, IOError):
allOutputData = None
if absolutePaths:
# Parent of current folder.
pathPrefix = __metaModel__._omfDir
else:
pathPrefix = ""
try:
inputDict = json.load(fs.open(pJoin(modelDir, "allInputData.json")))
except (IOError, HdfsFileNotFoundException):
pass
return template.render(allInputData=allInputData,
allOutputData=allOutputData, modelStatus=getStatus(modelDir, fs), pathPrefix=pathPrefix,
datastoreNames=datastoreNames)
def run(modelDir, inputDict, fs):
''' Run the model in a separate process. web.py calls this to run the model.
This function will return fast, but results take a while to hit the file system.'''
# Check whether model exist or not
if not fs.exists(modelDir):
fs.create_dir(modelDir)
inputDict["created"] = str(datetime.datetime.now())
# MAYBEFIX: remove this data dump. Check showModel in web.py and
# renderTemplate()
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
# If we are re-running, remove output and old GLD run:
try:
fs.remove(pJoin(modelDir, "allOutputData.json"))
fs.remove(pJoin(modelDir, "gldContainer"))
except:
pass
# Start background process.
backProc = multiprocessing.Process(
target=heavyProcessing, args=(modelDir, inputDict, fs))
backProc.start()
print "SENT TO BACKGROUND", modelDir
fs.save(pJoin(modelDir, "PPID.txt"), str(backProc.pid))
def runForeground(modelDir, inputDict, fs):
''' Run the model in the current process. WARNING: LONG RUN TIME. '''
# Check whether model exist or not
logger.info("Running solarEngineering model... modelDir: %s; inputDict: %s", modelDir, inputDict)
if not fs.exists(modelDir):
fs.create_dir(modelDir)
inputDict["created"] = str(datetime.datetime.now())
# MAYBEFIX: remove this data dump. Check showModel in web.py and
# renderTemplate()
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
# If we are re-running, remove output and old GLD run:
try:
fs.remove(pJoin(modelDir, "allOutputData.json"))
except:
pass
try:
fs.remove(pJoin(modelDir, "gldContainer"))
except:
pass
# Start process.
fs.save(pJoin(modelDir, "PPID.txt"), '-999')
heavyProcessing(modelDir, inputDict, fs)
def heavyProcessing(modelDir, inputDict, fs):
''' Run the model in its directory. WARNING: GRIDLAB CAN TAKE HOURS TO COMPLETE. '''
print "STARTING TO RUN", modelDir
beginTime = datetime.datetime.now()
# Get feeder name and data in.
try:
fs.create_dir(pJoin(modelDir, 'gldContainer'))
except:
pass
feederDir, feederName = inputDict["feederName"].split("___")
fs.export_from_fs_to_local(pJoin("data", "Feeder", feederDir, feederName + ".json"),
pJoin(modelDir, "feeder.json"))
inputDict["climateName"], latforpvwatts = zipCodeToClimateName(
inputDict["zipCode"], fs)
fs.export_from_fs_to_local(pJoin("data", "Climate", inputDict["climateName"] + ".tmy2"),
pJoin(modelDir, "gldContainer", "climate.tmy2"))
try:
startTime = datetime.datetime.now()
feederJson = json.load(open(pJoin(modelDir, "feeder.json")))
tree = feederJson["tree"]
# Set up GLM with correct time and recorders:
feeder.attachRecorders(tree, "Regulator", "object", "regulator")
feeder.attachRecorders(tree, "Capacitor", "object", "capacitor")
feeder.attachRecorders(tree, "Inverter", "object", "inverter")
feeder.attachRecorders(tree, "Windmill", "object", "windturb_dg")
feeder.attachRecorders(tree, "CollectorVoltage", None, None)
feeder.attachRecorders(tree, "Climate", "object", "climate")
feeder.attachRecorders(tree, "OverheadLosses", None, None)
feeder.attachRecorders(tree, "UndergroundLosses", None, None)
feeder.attachRecorders(tree, "TriplexLosses", None, None)
feeder.attachRecorders(tree, "TransformerLosses", None, None)
feeder.groupSwingKids(tree)
# Attach recorders for system voltage map:
stub = {'object': 'group_recorder', 'group': '"class=node"',
'property': 'voltage_A', 'interval': 3600, 'file': 'aVoltDump.csv'}
for phase in ['A', 'B', 'C']:
copyStub = dict(stub)
copyStub['property'] = 'voltage_' + phase
copyStub['file'] = phase.lower() + 'VoltDump.csv'
tree[feeder.getMaxKey(tree) + 1] = copyStub
feeder.adjustTime(tree=tree, simLength=float(inputDict["simLength"]),
simLengthUnits=inputDict["simLengthUnits"], simStartDate=inputDict["simStartDate"])
# RUN GRIDLABD IN FILESYSTEM (EXPENSIVE!)
rawOut = gridlabd.runInFilesystem(tree, attachments=feederJson["attachments"],
keepFiles=True, workDir=pJoin(modelDir, 'gldContainer'))
cleanOut = {}
# Std Err and Std Out
cleanOut['stderr'] = rawOut['stderr']
cleanOut['stdout'] = rawOut['stdout']
# Time Stamps
for key in rawOut:
if '# timestamp' in rawOut[key]:
cleanOut['timeStamps'] = rawOut[key]['# timestamp']
break
elif '# property.. timestamp' in rawOut[key]:
cleanOut['timeStamps'] = rawOut[key]['# property.. timestamp']
else:
cleanOut['timeStamps'] = []
# Day/Month Aggregation Setup:
stamps = cleanOut.get('timeStamps', [])
level = inputDict.get('simLengthUnits', 'hours')
# Climate
for key in rawOut:
if key.startswith('Climate_') and key.endswith('.csv'):
cleanOut['climate'] = {}
cleanOut['climate'][
'Rain Fall (in/h)'] = hdmAgg(rawOut[key].get('rainfall'), sum, level, stamps)
cleanOut['climate'][
'Wind Speed (m/s)'] = hdmAgg(rawOut[key].get('wind_speed'), avg, level, stamps)
cleanOut['climate']['Temperature (F)'] = hdmAgg(
rawOut[key].get('temperature'), max, level, stamps)
cleanOut['climate']['Snow Depth (in)'] = hdmAgg(
rawOut[key].get('snowdepth'), max, level, stamps)
cleanOut['climate'][
'Direct Normal (W/sf)'] = hdmAgg(rawOut[key].get('solar_direct'), sum, level, stamps)
#cleanOut['climate']['Global Horizontal (W/sf)'] = hdmAgg(rawOut[key].get('solar_global'), sum, level)
climateWbySFList = hdmAgg(
rawOut[key].get('solar_global'), sum, level, stamps)
# converting W/sf to W/sm
climateWbySMList = [x * 10.76392 for x in climateWbySFList]
cleanOut['climate'][
'Global Horizontal (W/sm)'] = climateWbySMList
# Voltage Band
if 'VoltageJiggle.csv' in rawOut:
cleanOut['allMeterVoltages'] = {}
cleanOut['allMeterVoltages']['Min'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['min(voltage_12.mag)']], min, level, stamps)
cleanOut['allMeterVoltages']['Mean'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['mean(voltage_12.mag)']], avg, level, stamps)
cleanOut['allMeterVoltages']['StdDev'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['std(voltage_12.mag)']], avg, level, stamps)
cleanOut['allMeterVoltages']['Max'] = hdmAgg(
[float(i / 2) for i in rawOut['VoltageJiggle.csv']['max(voltage_12.mag)']], max, level, stamps)
# Power Consumption
cleanOut['Consumption'] = {}
# Set default value to be 0, avoiding missing value when computing
# Loads
cleanOut['Consumption']['Power'] = [0] * int(inputDict["simLength"])
cleanOut['Consumption']['Losses'] = [0] * int(inputDict["simLength"])
cleanOut['Consumption']['DG'] = [0] * int(inputDict["simLength"])
for key in rawOut:
if key.startswith('SwingKids_') and key.endswith('.csv'):
oneSwingPower = hdmAgg(vecPyth(
rawOut[key]['sum(power_in.real)'], rawOut[key]['sum(power_in.imag)']), avg, level, stamps)
if 'Power' not in cleanOut['Consumption']:
cleanOut['Consumption']['Power'] = oneSwingPower
else:
cleanOut['Consumption']['Power'] = vecSum(
oneSwingPower, cleanOut['Consumption']['Power'])
elif key.startswith('Inverter_') and key.endswith('.csv'):
realA = rawOut[key]['power_A.real']
realB = rawOut[key]['power_B.real']
realC = rawOut[key]['power_C.real']
imagA = rawOut[key]['power_A.imag']
imagB = rawOut[key]['power_B.imag']
imagC = rawOut[key]['power_C.imag']
oneDgPower = hdmAgg(vecSum(vecPyth(realA, imagA), vecPyth(
realB, imagB), vecPyth(realC, imagC)), avg, level, stamps)
if 'DG' not in cleanOut['Consumption']:
cleanOut['Consumption']['DG'] = oneDgPower
else:
cleanOut['Consumption']['DG'] = vecSum(
oneDgPower, cleanOut['Consumption']['DG'])
elif key.startswith('Windmill_') and key.endswith('.csv'):
vrA = rawOut[key]['voltage_A.real']
vrB = rawOut[key]['voltage_B.real']
vrC = rawOut[key]['voltage_C.real']
viA = rawOut[key]['voltage_A.imag']
viB = rawOut[key]['voltage_B.imag']
viC = rawOut[key]['voltage_C.imag']
crB = rawOut[key]['current_B.real']
crA = rawOut[key]['current_A.real']
crC = rawOut[key]['current_C.real']
ciA = rawOut[key]['current_A.imag']
ciB = rawOut[key]['current_B.imag']
ciC = rawOut[key]['current_C.imag']
powerA = vecProd(vecPyth(vrA, viA), vecPyth(crA, ciA))
powerB = vecProd(vecPyth(vrB, viB), vecPyth(crB, ciB))
powerC = vecProd(vecPyth(vrC, viC), vecPyth(crC, ciC))
oneDgPower = hdmAgg(vecSum(powerA, powerB, powerC), avg, level, stamps)
if 'DG' not in cleanOut['Consumption']:
cleanOut['Consumption']['DG'] = oneDgPower
else:
cleanOut['Consumption']['DG'] = vecSum(
oneDgPower, cleanOut['Consumption']['DG'])
elif key in ['OverheadLosses.csv', 'UndergroundLosses.csv', 'TriplexLosses.csv', 'TransformerLosses.csv']:
realA = rawOut[key]['sum(power_losses_A.real)']
imagA = rawOut[key]['sum(power_losses_A.imag)']
realB = rawOut[key]['sum(power_losses_B.real)']
imagB = rawOut[key]['sum(power_losses_B.imag)']
realC = rawOut[key]['sum(power_losses_C.real)']
imagC = rawOut[key]['sum(power_losses_C.imag)']
oneLoss = hdmAgg(vecSum(vecPyth(realA, imagA), vecPyth(
realB, imagB), vecPyth(realC, imagC)), avg, level, stamps)
if 'Losses' not in cleanOut['Consumption']:
cleanOut['Consumption']['Losses'] = oneLoss
else:
cleanOut['Consumption']['Losses'] = vecSum(
oneLoss, cleanOut['Consumption']['Losses'])
elif key.startswith('Regulator_') and key.endswith('.csv'):
# split function to strip off .csv from filename and user rest
# of the file name as key. for example- Regulator_VR10.csv ->
# key would be Regulator_VR10
regName = ""
regName = key
newkey = regName.split(".")[0]
cleanOut[newkey] = {}
cleanOut[newkey]['RegTapA'] = [0] * int(inputDict["simLength"])
cleanOut[newkey]['RegTapB'] = [0] * int(inputDict["simLength"])
cleanOut[newkey]['RegTapC'] = [0] * int(inputDict["simLength"])
cleanOut[newkey]['RegTapA'] = rawOut[key]['tap_A']
cleanOut[newkey]['RegTapB'] = rawOut[key]['tap_B']
cleanOut[newkey]['RegTapC'] = rawOut[key]['tap_C']
cleanOut[newkey]['RegPhases'] = rawOut[key]['phases'][0]
elif key.startswith('Capacitor_') and key.endswith('.csv'):
capName = ""
capName = key
newkey = capName.split(".")[0]
cleanOut[newkey] = {}
cleanOut[newkey]['Cap1A'] = [0] * int(inputDict["simLength"])
cleanOut[newkey]['Cap1B'] = [0] * int(inputDict["simLength"])
cleanOut[newkey]['Cap1C'] = [0] * int(inputDict["simLength"])
cleanOut[newkey]['Cap1A'] = rawOut[key]['switchA']
cleanOut[newkey]['Cap1B'] = rawOut[key]['switchB']
cleanOut[newkey]['Cap1C'] = rawOut[key]['switchC']
cleanOut[newkey]['CapPhases'] = rawOut[key]['phases'][0]
# What percentage of our keys have lat lon data?
latKeys = [tree[key]['latitude']
for key in tree if 'latitude' in tree[key]]
latPerc = 1.0 * len(latKeys) / len(tree)
if latPerc < 0.25:
doNeato = True
else:
doNeato = False
# Generate the frames for the system voltage map time traveling chart.
genTime = generateVoltChart(
tree, rawOut, modelDir, neatoLayout=doNeato)
cleanOut['genTime'] = genTime
# Aggregate up the timestamps:
if level == 'days':
cleanOut['timeStamps'] = aggSeries(
stamps, stamps, lambda x: x[0][0:10], 'days')
elif level == 'months':
cleanOut['timeStamps'] = aggSeries(
stamps, stamps, lambda x: x[0][0:7], 'months')
# Write the output.
fs.save(pJoin(modelDir, "allOutputData.json"), json.dumps(cleanOut, indent=4))
# Update the runTime in the input file.
endTime = datetime.datetime.now()
inputDict["runTime"] = str(
datetime.timedelta(seconds=int((endTime - startTime).total_seconds())))
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
# Clean up the PID file.
fs.remove(pJoin(modelDir, "gldContainer", "PID.txt"))
print "DONE RUNNING", modelDir
except Exception as e:
print "MODEL CRASHED", e
# Cancel to get rid of extra background processes.
try:
fs.remove(pJoin(modelDir, 'PPID.txt'))
except:
pass
thisErr = traceback.format_exc()
inputDict['stderr'] = thisErr
with open(os.path.join(modelDir, 'stderr.txt'), 'w') as errorFile:
errorFile.write(thisErr)
# Dump input with error included.
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
finishTime = datetime.datetime.now()
inputDict["runTime"] = str(
datetime.timedelta(seconds=int((finishTime - beginTime).total_seconds())))
fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inputDict, indent=4))
try:
fs.remove(pJoin(modelDir, "PPID.txt"))
except:
pass
def generateVoltChart(tree, rawOut, modelDir, neatoLayout=True):
''' Map the voltages on a feeder over time using a movie.'''
# We need to timestamp frames with the system clock to make sure the
# browser caches them appropriately.
genTime = str(datetime.datetime.now()).replace(':', '.')
# Detect the feeder nominal voltage:
for key in tree:
ob = tree[key]
if type(ob) == dict and ob.get('bustype', '') == 'SWING':
feedVoltage = float(ob.get('nominal_voltage', 1))
# Make a graph object.
fGraph = feeder.treeToNxGraph(tree)
if neatoLayout:
# HACK: work on a new graph without attributes because graphViz tries
# to read attrs.
cleanG = nx.Graph(fGraph.edges())
cleanG.add_nodes_from(fGraph)
positions = nx.graphviz_layout(cleanG, prog='neato')
else:
rawPositions = {n: fGraph.node[n].get('pos', (0, 0)) for n in fGraph}
# HACK: the import code reverses the y coords.
def yFlip(pair):
try:
return (pair[0], -1.0 * pair[1])
except:
return (0, 0)
positions = {k: yFlip(rawPositions[k]) for k in rawPositions}
# Plot all time steps.
nodeVolts = {}
for step, stamp in enumerate(rawOut['aVoltDump.csv']['# timestamp']):
# Build voltage map.
nodeVolts[step] = {}
for nodeName in [x for x in rawOut['aVoltDump.csv'].keys() if x != '# timestamp']:
allVolts = []
for phase in ['a', 'b', 'c']:
voltStep = rawOut[phase + 'VoltDump.csv'][nodeName][step]
# HACK: Gridlab complex number format sometimes uses i,
# sometimes j, sometimes d. WTF?
if type(voltStep) is str:
voltStep = voltStep.replace('i', 'j')
v = complex(voltStep)
phaseVolt = abs(v)
if phaseVolt != 0.0:
if _digits(phaseVolt) > 3:
# Normalize to 120 V standard
phaseVolt = phaseVolt * (120 / feedVoltage)
allVolts.append(phaseVolt)
# HACK: Take average of all phases to collapse dimensionality.
nodeVolts[step][nodeName] = avg(allVolts)
# Draw animation.
voltChart = plt.figure(figsize=(10, 10))
plt.axes(frameon=0)
plt.axis('off')
voltChart.subplots_adjust(
left=0.03, bottom=0.03, right=0.97, top=0.97, wspace=None, hspace=None)
custom_cm = matplotlib.colors.LinearSegmentedColormap.from_list(
'custColMap', [(0.0, 'blue'), (0.25, 'darkgray'), (0.75, 'darkgray'), (1.0, 'yellow')])
edgeIm = nx.draw_networkx_edges(fGraph, positions)
nodeIm = nx.draw_networkx_nodes(fGraph,
pos=positions,
node_color=[
nodeVolts[0].get(n, 0) for n in fGraph.nodes()],
linewidths=0,
node_size=30,
cmap=custom_cm)
plt.sci(nodeIm)
plt.clim(110, 130)
plt.colorbar()
plt.title(rawOut['aVoltDump.csv']['# timestamp'][0])
def update(step):
nodeColors = np.array([nodeVolts[step].get(n, 0)
for n in fGraph.nodes()])
plt.title(rawOut['aVoltDump.csv']['# timestamp'][step])
nodeIm.set_array(nodeColors)
return nodeColors,
anim = FuncAnimation(voltChart, update, frames=len(
rawOut['aVoltDump.csv']['# timestamp']), interval=200, blit=False)
anim.save(pJoin(modelDir, 'voltageChart.mp4'),
codec='h264', extra_args=['-pix_fmt', 'yuv420p'])
# Reclaim memory by closing, deleting and garbage collecting the last
# chart.
voltChart.clf()
plt.close()
del voltChart
gc.collect()
return genTime
def avg(inList):
''' Average a list. Really wish this was built-in. '''
return sum(inList) / len(inList)
def hdmAgg(series, func, level, stamps):
''' Simple hour/day/month aggregation for Gridlab. '''
if level in ['days', 'months']:
return aggSeries(stamps, series, func, level)
else:
return series
def aggSeries(timeStamps, timeSeries, func, level):
''' Aggregate a list + timeStamps up to the required time level. '''
# Different substring depending on what level we aggregate to:
if level == 'months':
endPos = 7
elif level == 'days':
endPos = 10
combo = zip(timeStamps, timeSeries)
# Group by level:
groupedCombo = _groupBy(
combo, lambda x1, x2: x1[0][0:endPos] == x2[0][0:endPos])
# Get rid of the timestamps:
groupedRaw = [[pair[1] for pair in group] for group in groupedCombo]
return map(func, groupedRaw)
def _pyth(x, y):
''' Compute the third side of a triangle--BUT KEEP SIGNS THE SAME FOR DG. '''
sign = lambda z: (-1 if z < 0 else 1)
fullSign = sign(sign(x) * x * x + sign(y) * y * y)
return fullSign * math.sqrt(x * x + y * y)
def _digits(x):
''' Returns number of digits before the decimal in the float x. '''
return math.ceil(math.log10(x + 1))
def vecPyth(vx, vy):
''' Pythagorean theorem for pairwise elements from two vectors. '''
rows = zip(vx, vy)
return map(lambda x: _pyth(*x), rows)
def vecSum(*args):
''' Add n vectors. '''
return map(sum, zip(*args))
def _prod(inList):
''' Product of all values in a list. '''
return reduce(lambda x, y: x * y, inList, 1)
def vecProd(*args):
''' Multiply n vectors. '''
return map(_prod, zip(*args))
def threePhasePowFac(ra, rb, rc, ia, ib, ic):
''' Get power factor for a row of threephase volts and amps. Gridlab-specific. '''
pfRow = lambda row: math.cos(
math.atan((row[0] + row[1] + row[2]) / (row[3] + row[4] + row[5])))
rows = zip(ra, rb, rc, ia, ib, ic)
return map(pfRow, rows)
def roundSeries(ser):
''' Round everything in a vector to 4 sig figs. '''
return map(lambda x: roundSig(x, 4), ser)
def _groupBy(inL, func):
''' Take a list and func, and group items in place comparing with func. Make sure the func is an equivalence relation, or your brain will hurt. '''
if inL == []:
return inL
if len(inL) == 1:
return [inL]
newL = [[inL[0]]]
for item in inL[1:]:
if func(item, newL[-1][0]):
newL[-1].append(item)
else:
newL.append([item])
return newL
def _tests():
# Variables
from .. import filesystem
fs = filesystem.Filesystem().fs
inData = {"simStartDate": "2012-04-01",
"simLengthUnits": "hours",
"feederName": "public___Olin Barre GH EOL Solar",
"modelType": "solarEngineering",
"zipCode": "64735",
"simLength": "24",
"runTime": ""}
modelLoc = pJoin(__metaModel__._omfDir, "data", "Model",
"admin", "Automated solarEngineering Test")
# Blow away old test results if necessary.
try:
shutil.rmtree(modelLoc)
except:
# No previous test results.
pass
# No-input template.
# renderAndShow(template)
# Run the model.
runForeground(modelLoc, fs, inData)
# Cancel the model.
# time.sleep(2)
# cancel(modelLoc)
# Show the output.
renderAndShow(template, fs, modelDir=modelLoc)
# Delete the model.
# shutil.rmtree(modelLoc)
if __name__ == '__main__':
_tests()
| gpl-2.0 |
tamasgal/km3pipe | km3pipe/style/__init__.py | 1 | 2367 | # Filename: style.py
# pylint: disable=locally-disabled
"""
The KM3Pipe style definitions.
"""
from os.path import dirname, join, exists
from itertools import cycle
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "[email protected]"
__status__ = "Development"
STYLE_DIR = join(dirname(dirname(__file__)), "stylelib")
def get_style_path(style):
return STYLE_DIR + "/" + style + ".mplstyle"
def use(style="km3pipe"):
import matplotlib.pyplot as plt
for s in (get_style_path("km3pipe-" + style), get_style_path(style), style):
if exists(s):
plt.style.use(s)
return
print("Could not find style: '{0}'".format(style))
class ColourCycler(object):
"""Basic colour cycler.
Instantiate with `cc = ColourCycler()` and use it in plots
like `plt.plot(xs, ys, c=next(cc))`.
"""
def __init__(self, palette="km3pipe"):
self.colours = {}
self.refresh_styles()
self.choose(palette)
def choose(self, palette):
"""Pick a palette"""
try:
self._cycler = cycle(self.colours[palette])
except KeyError:
raise KeyError(
"Chose one of the following colour palettes: {0}".format(self.available)
)
def refresh_styles(self):
"""Load all available styles"""
import matplotlib.pyplot as plt
self.colours = {}
for style in plt.style.available:
try:
style_colours = plt.style.library[style]["axes.prop_cycle"]
self.colours[style] = [c["color"] for c in list(style_colours)]
except KeyError:
continue
self.colours["km3pipe"] = [
"#ff7869",
"#4babe1",
"#96ad3e",
"#e4823d",
"#5d72b2",
"#e2a3c2",
"#fd9844",
"#e480e7",
]
@property
def available(self):
"""Return a list of available styles"""
return list(self.colours.keys())
def __next__(self):
"""Return the next colour in current palette"""
return next(self._cycler)
def next(self):
"""Python 2 compatibility for iterators"""
return self.__next__()
| mit |
kambysese/mne-python | tutorials/stats-source-space/plot_stats_cluster_spatio_temporal_repeated_measures_anova.py | 18 | 12326 | """
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Denis Engemannn <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep * 1000 # convert to milliseconds
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 ICO source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately, but here since all estimates are on
# 'sample' we can use one morph matrix for all the heavy lifting.
# Read the source space we are morphing to (just left hemisphere)
src = mne.read_source_spaces(src_fname)
fsave_vertices = [src[0]['vertno'], []]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir, smooth=20).morph_mat
morph_mat = morph_mat[:, :n_vertices_sample] # just left hemi from src
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language).
# As an aside, note that in this particular example, we cannot use the A*B
# notation which return both the main and the interaction effect. The reason
# is that the clustering function expects ``stat_fun`` to return a 1-D array.
# To get clusters for both, you must create a loop.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A ``stat_fun`` must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# .. note:: For further details on this ANOVA function consider the
# corresponding
# :ref:`time-frequency tutorial <tut-timefreq-twoway-anova>`.
def stat_fun(*args):
# get f-values only.
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
###############################################################################
# Compute clustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial adjacency matrix (instead of spatio-temporal).
# as we only have one hemisphere we need only need half the adjacency
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src[:1])
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, views='lat',
time_label='temporal extent (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses:
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
| bsd-3-clause |
arokem/seaborn | seaborn/relational.py | 2 | 37384 | import warnings
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._core import (
VectorPlotter,
)
from .utils import (
locator_to_legend_entries,
adjust_legend_subtitles,
_default_color,
_deprecate_ci,
)
from ._statistics import EstimateAggregator
from .axisgrid import FacetGrid, _facet_docs
from ._decorators import _deprecate_positional_args
from ._docstrings import (
DocstringComponents,
_core_docs,
)
__all__ = ["relplot", "scatterplot", "lineplot"]
_relational_narrative = DocstringComponents(dict(
# --- Introductory prose
main_api="""
The relationship between ``x`` and ``y`` can be shown for different subsets
of the data using the ``hue``, ``size``, and ``style`` parameters. These
parameters control what visual semantics are used to identify the different
subsets. It is possible to show up to three dimensions independently by
using all three semantic types, but this style of plot can be hard to
interpret and is often ineffective. Using redundant semantics (i.e. both
``hue`` and ``style`` for the same variable) can be helpful for making
graphics more accessible.
See the :ref:`tutorial <relational_tutorial>` for more information.
""",
relational_semantic="""
The default treatment of the ``hue`` (and to a lesser extent, ``size``)
semantic, if present, depends on whether the variable is inferred to
represent "numeric" or "categorical" data. In particular, numeric variables
are represented with a sequential colormap by default, and the legend
entries show regular "ticks" with values that may or may not exist in the
data. This behavior can be controlled through various parameters, as
described and illustrated below.
""",
))
_relational_docs = dict(
# --- Shared function parameters
data_vars="""
x, y : names of variables in ``data`` or vector data
Input data variables; must be numeric. Can pass data directly or
reference columns in ``data``.
""",
data="""
data : DataFrame, array, or list of arrays
Input data structure. If ``x`` and ``y`` are specified as names, this
should be a "long-form" DataFrame containing those columns. Otherwise
it is treated as "wide-form" data and grouping variables are ignored.
See the examples for the various ways this parameter can be specified
and the different effects of each.
""",
palette="""
palette : string, list, dict, or matplotlib colormap
An object that determines how colors are chosen when ``hue`` is used.
It can be the name of a seaborn palette or matplotlib colormap, a list
of colors (anything matplotlib understands), a dict mapping levels
of the ``hue`` variable to colors, or a matplotlib colormap object.
""",
hue_order="""
hue_order : list
Specified order for the appearance of the ``hue`` variable levels,
otherwise they are determined from the data. Not relevant when the
``hue`` variable is numeric.
""",
hue_norm="""
hue_norm : tuple or :class:`matplotlib.colors.Normalize` object
Normalization in data units for colormap applied to the ``hue``
variable when it is numeric. Not relevant if it is categorical.
""",
sizes="""
sizes : list, dict, or tuple
An object that determines how sizes are chosen when ``size`` is used.
It can always be a list of size values or a dict mapping levels of the
``size`` variable to sizes. When ``size`` is numeric, it can also be
a tuple specifying the minimum and maximum size to use such that other
values are normalized within this range.
""",
size_order="""
size_order : list
Specified order for appearance of the ``size`` variable levels,
otherwise they are determined from the data. Not relevant when the
``size`` variable is numeric.
""",
size_norm="""
size_norm : tuple or Normalize object
Normalization in data units for scaling plot objects when the
``size`` variable is numeric.
""",
dashes="""
dashes : boolean, list, or dictionary
Object determining how to draw the lines for different levels of the
``style`` variable. Setting to ``True`` will use default dash codes, or
you can pass a list of dash codes or a dictionary mapping levels of the
``style`` variable to dash codes. Setting to ``False`` will use solid
lines for all subsets. Dashes are specified as in matplotlib: a tuple
of ``(segment, gap)`` lengths, or an empty string to draw a solid line.
""",
markers="""
markers : boolean, list, or dictionary
Object determining how to draw the markers for different levels of the
``style`` variable. Setting to ``True`` will use default markers, or
you can pass a list of markers or a dictionary mapping levels of the
``style`` variable to markers. Setting to ``False`` will draw
marker-less lines. Markers are specified as in matplotlib.
""",
style_order="""
style_order : list
Specified order for appearance of the ``style`` variable levels
otherwise they are determined from the data. Not relevant when the
``style`` variable is numeric.
""",
units="""
units : vector or key in ``data``
Grouping variable identifying sampling units. When used, a separate
line will be drawn for each unit with appropriate semantics, but no
legend entry will be added. Useful for showing distribution of
experimental replicates when exact identities are not needed.
""",
estimator="""
estimator : name of pandas method or callable or None
Method for aggregating across multiple observations of the ``y``
variable at the same ``x`` level. If ``None``, all observations will
be drawn.
""",
ci="""
ci : int or "sd" or None
Size of the confidence interval to draw when aggregating.
.. deprecated:: 0.12.0
Use the new `errorbar` parameter for more flexibility.
""",
n_boot="""
n_boot : int
Number of bootstraps to use for computing the confidence interval.
""",
seed="""
seed : int, numpy.random.Generator, or numpy.random.RandomState
Seed or random number generator for reproducible bootstrapping.
""",
legend="""
legend : "auto", "brief", "full", or False
How to draw the legend. If "brief", numeric ``hue`` and ``size``
variables will be represented with a sample of evenly spaced values.
If "full", every group will get an entry in the legend. If "auto",
choose between brief or full representation based on number of levels.
If ``False``, no legend data is added and no legend is drawn.
""",
ax_in="""
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses the current Axes.
""",
ax_out="""
ax : matplotlib Axes
Returns the Axes object with the plot drawn onto it.
""",
)
_param_docs = DocstringComponents.from_nested_components(
core=_core_docs["params"],
facets=DocstringComponents(_facet_docs),
rel=DocstringComponents(_relational_docs),
stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),
)
class _RelationalPlotter(VectorPlotter):
wide_structure = {
"x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
}
# TODO where best to define default parameters?
sort = True
def add_legend_data(self, ax):
"""Add labeled artists to represent the different plot semantics."""
verbosity = self.legend
if isinstance(verbosity, str) and verbosity not in ["auto", "brief", "full"]:
err = "`legend` must be 'auto', 'brief', 'full', or a boolean."
raise ValueError(err)
elif verbosity is True:
verbosity = "auto"
legend_kwargs = {}
keys = []
# Assign a legend title if there is only going to be one sub-legend,
# otherwise, subtitles will be inserted into the texts list with an
# invisible handle (which is a hack)
titles = {
title for title in
(self.variables.get(v, None) for v in ["hue", "size", "style"])
if title is not None
}
if len(titles) == 1:
legend_title = titles.pop()
else:
legend_title = ""
title_kws = dict(
visible=False, color="w", s=0, linewidth=0, marker="", dashes=""
)
def update(var_name, val_name, **kws):
key = var_name, val_name
if key in legend_kwargs:
legend_kwargs[key].update(**kws)
else:
keys.append(key)
legend_kwargs[key] = dict(**kws)
# Define the maximum number of ticks to use for "brief" legends
brief_ticks = 6
# -- Add a legend for hue semantics
brief_hue = self._hue_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._hue_map.levels) > brief_ticks)
)
if brief_hue:
if isinstance(self._hue_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
limits = min(self._hue_map.levels), max(self._hue_map.levels)
hue_levels, hue_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["hue"].infer_objects().dtype
)
elif self._hue_map.levels is None:
hue_levels = hue_formatted_levels = []
else:
hue_levels = hue_formatted_levels = self._hue_map.levels
# Add the hue semantic subtitle
if not legend_title and self.variables.get("hue", None) is not None:
update((self.variables["hue"], "title"),
self.variables["hue"], **title_kws)
# Add the hue semantic labels
for level, formatted_level in zip(hue_levels, hue_formatted_levels):
if level is not None:
color = self._hue_map(level)
update(self.variables["hue"], formatted_level, color=color)
# -- Add a legend for size semantics
brief_size = self._size_map.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(self._size_map.levels) > brief_ticks)
)
if brief_size:
# Define how ticks will interpolate between the min/max data values
if isinstance(self._size_map.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
# Define the min/max data values
limits = min(self._size_map.levels), max(self._size_map.levels)
size_levels, size_formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data["size"].infer_objects().dtype
)
elif self._size_map.levels is None:
size_levels = size_formatted_levels = []
else:
size_levels = size_formatted_levels = self._size_map.levels
# Add the size semantic subtitle
if not legend_title and self.variables.get("size", None) is not None:
update((self.variables["size"], "title"),
self.variables["size"], **title_kws)
# Add the size semantic labels
for level, formatted_level in zip(size_levels, size_formatted_levels):
if level is not None:
size = self._size_map(level)
update(
self.variables["size"],
formatted_level,
linewidth=size,
s=size,
)
# -- Add a legend for style semantics
# Add the style semantic title
if not legend_title and self.variables.get("style", None) is not None:
update((self.variables["style"], "title"),
self.variables["style"], **title_kws)
# Add the style semantic labels
if self._style_map.levels is not None:
for level in self._style_map.levels:
if level is not None:
attrs = self._style_map(level)
update(
self.variables["style"],
level,
marker=attrs.get("marker", ""),
dashes=attrs.get("dashes", ""),
)
func = getattr(ax, self._legend_func)
legend_data = {}
legend_order = []
for key in keys:
_, label = key
kws = legend_kwargs[key]
kws.setdefault("color", ".2")
use_kws = {}
for attr in self._legend_attributes + ["visible"]:
if attr in kws:
use_kws[attr] = kws[attr]
artist = func([], [], label=label, **use_kws)
if self._legend_func == "plot":
artist = artist[0]
legend_data[key] = artist
legend_order.append(key)
self.legend_title = legend_title
self.legend_data = legend_data
self.legend_order = legend_order
class _LinePlotter(_RelationalPlotter):
_legend_attributes = ["color", "linewidth", "marker", "dashes"]
_legend_func = "plot"
def __init__(
self, *,
data=None, variables={},
estimator=None, ci=None, n_boot=None, seed=None,
sort=True, err_style=None, err_kws=None, legend=None,
errorbar=None,
):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * mpl.rcParams["lines.linewidth"]
)
super().__init__(data=data, variables=variables)
self.estimator = estimator
self.errorbar = errorbar
self.ci = ci
self.n_boot = n_boot
self.seed = seed
self.sort = sort
self.err_style = err_style
self.err_kws = {} if err_kws is None else err_kws
self.legend = legend
def plot(self, ax, kws):
"""Draw the plot onto an axes, passing matplotlib kwargs."""
# Draw a test plot, using the passed in kwargs. The goal here is to
# honor both (a) the current state of the plot cycler and (b) the
# specified kwargs on all the lines we will draw, overriding when
# relevant with the data semantics. Note that we won't cycle
# internally; in other words, if ``hue`` is not used, all elements will
# have the same color, but they will have the color that you would have
# gotten from the corresponding matplotlib function, and calling the
# function will advance the axes property cycle.
kws.setdefault("markeredgewidth", kws.pop("mew", .75))
kws.setdefault("markeredgecolor", kws.pop("mec", "w"))
# Set default error kwargs
err_kws = self.err_kws.copy()
if self.err_style == "band":
err_kws.setdefault("alpha", .2)
elif self.err_style == "bars":
pass
elif self.err_style is not None:
err = "`err_style` must be 'band' or 'bars', not {}"
raise ValueError(err.format(self.err_style))
# Initialize the aggregation object
agg = EstimateAggregator(
self.estimator, self.errorbar, n_boot=self.n_boot, seed=self.seed,
)
# TODO abstract variable to aggregate over here-ish. Better name?
agg_var = "y"
grouper = ["x"]
# TODO How to handle NA? We don't want NA to propagate through to the
# estimate/CI when some values are present, but we would also like
# matplotlib to show "gaps" in the line when all values are missing.
# This is straightforward absent aggregation, but complicated with it.
# If we want to use nas, we need to conditionalize dropna in iter_data.
# Loop over the semantic subsets and add to the plot
grouping_vars = "hue", "size", "style"
for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):
if self.sort:
sort_vars = ["units", "x", "y"]
sort_cols = [var for var in sort_vars if var in self.variables]
sub_data = sub_data.sort_values(sort_cols)
if self.estimator is not None:
if "units" in self.variables:
# TODO eventually relax this constraint
err = "estimator must be None when specifying units"
raise ValueError(err)
grouped = sub_data.groupby(grouper, sort=self.sort)
# Could pass as_index=False instead of reset_index,
# but that fails on a corner case with older pandas.
sub_data = grouped.apply(agg, agg_var).reset_index()
# TODO this is pretty ad hoc ; see GH2409
for var in "xy":
if self._log_scaled(var):
for col in sub_data.filter(regex=f"^{var}"):
sub_data[col] = np.power(10, sub_data[col])
# --- Draw the main line(s)
if "units" in self.variables: # XXX why not add to grouping variables?
lines = []
for _, unit_data in sub_data.groupby("units"):
lines.extend(ax.plot(unit_data["x"], unit_data["y"], **kws))
else:
lines = ax.plot(sub_data["x"], sub_data["y"], **kws)
for line in lines:
if "hue" in sub_vars:
line.set_color(self._hue_map(sub_vars["hue"]))
if "size" in sub_vars:
line.set_linewidth(self._size_map(sub_vars["size"]))
if "style" in sub_vars:
attributes = self._style_map(sub_vars["style"])
if "dashes" in attributes:
line.set_dashes(attributes["dashes"])
if "marker" in attributes:
line.set_marker(attributes["marker"])
line_color = line.get_color()
line_alpha = line.get_alpha()
line_capstyle = line.get_solid_capstyle()
# --- Draw the confidence intervals
if self.estimator is not None and self.errorbar is not None:
# TODO handling of orientation will need to happen here
if self.err_style == "band":
ax.fill_between(
sub_data["x"], sub_data["ymin"], sub_data["ymax"],
color=line_color, **err_kws
)
elif self.err_style == "bars":
error_deltas = (
sub_data["y"] - sub_data["ymin"],
sub_data["ymax"] - sub_data["y"],
)
ebars = ax.errorbar(
sub_data["x"], sub_data["y"], error_deltas,
linestyle="", color=line_color, alpha=line_alpha,
**err_kws
)
# Set the capstyle properly on the error bars
for obj in ebars.get_children():
if isinstance(obj, mpl.collections.LineCollection):
obj.set_capstyle(line_capstyle)
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
class _ScatterPlotter(_RelationalPlotter):
_legend_attributes = ["color", "s", "marker"]
_legend_func = "scatter"
def __init__(
self, *,
data=None, variables={},
x_bins=None, y_bins=None,
estimator=None, ci=None, n_boot=None,
alpha=None, x_jitter=None, y_jitter=None,
legend=None
):
# TODO this is messy, we want the mapping to be agnoistic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.alpha = alpha
self.legend = legend
def plot(self, ax, kws):
# --- Determine the visual attributes of the plot
data = self.plot_data.dropna()
if data.empty:
return
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
# Set defaults for other visual attributes
kws.setdefault("edgecolor", "w")
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# TODO this makes it impossible to vary alpha with hue which might
# otherwise be useful? Should we just pass None?
kws["alpha"] = 1 if self.alpha == "auto" else self.alpha
# Draw the scatter plot
points = ax.scatter(x=x, y=y, **kws)
# Apply the mapping from semantic variables to artist attributes
if "hue" in self.variables:
points.set_facecolors(self._hue_map(data["hue"]))
if "size" in self.variables:
points.set_sizes(self._size_map(data["size"]))
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Apply dependant default attributes
if "linewidth" not in kws:
sizes = points.get_sizes()
points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
self.add_legend_data(ax)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
@_deprecate_positional_args
def lineplot(
*,
x=None, y=None,
hue=None, size=None, style=None,
data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
units=None, estimator="mean", ci="deprecated", n_boot=1000, seed=None,
sort=True, err_style="band", err_kws=None,
legend="auto",
errorbar=("ci", 95),
ax=None, **kwargs
):
# Handle deprecation of ci parameter
errorbar = _deprecate_ci(errorbar, ci)
variables = _LinePlotter.get_semantics(locals())
p = _LinePlotter(
data=data, variables=variables,
estimator=estimator, ci=ci, n_boot=n_boot, seed=seed,
sort=sort, err_style=err_style, err_kws=err_kws, legend=legend,
errorbar=errorbar,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if style is None and not {"ls", "linestyle"} & set(kwargs): # XXX
kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
lineplot.__doc__ = """\
Draw a line plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
By default, the plot aggregates over multiple ``y`` values at each value of
``x`` and shows an estimate of the central tendency and a confidence
interval for that estimate.
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce lines with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce lines with different widths.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce lines with different dashes
and/or markers. Can have a numeric dtype but will always be treated
as categorical.
{params.core.data}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.dashes}
{params.rel.markers}
{params.rel.style_order}
{params.rel.units}
{params.rel.estimator}
{params.rel.ci}
{params.rel.n_boot}
{params.rel.seed}
sort : boolean
If True, the data will be sorted by the x and y variables, otherwise
lines will connect points in the order they appear in the dataset.
err_style : "band" or "bars"
Whether to draw the confidence intervals with translucent error bands
or discrete error bars.
err_kws : dict of keyword arguments
Additional paramters to control the aesthetics of the error bars. The
kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between`
or :meth:`matplotlib.axes.Axes.errorbar`, depending on ``err_style``.
{params.rel.legend}
{params.stat.errorbar}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.scatterplot}
{seealso.pointplot}
Examples
--------
.. include:: ../docstrings/lineplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def scatterplot(
*,
x=None, y=None,
hue=None, style=None, size=None, data=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None,
x_bins=None, y_bins=None,
units=None, estimator=None, ci=95, n_boot=1000,
alpha=None, x_jitter=None, y_jitter=None,
legend="auto", ax=None,
**kwargs
):
variables = _ScatterPlotter.get_semantics(locals())
p = _ScatterPlotter(
data=data, variables=variables,
x_bins=x_bins, y_bins=y_bins,
estimator=estimator, ci=ci, n_boot=n_boot,
alpha=alpha, x_jitter=x_jitter, y_jitter=y_jitter, legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = """\
Draw a scatter plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{params.core.data}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.markers}
{params.rel.style_order}
{{x,y}}_bins : lists or arrays or functions
*Currently non-functional.*
{params.rel.units}
*Currently non-functional.*
{params.rel.estimator}
*Currently non-functional.*
{params.rel.ci}
*Currently non-functional.*
{params.rel.n_boot}
*Currently non-functional.*
alpha : float
Proportional opacity of the points.
{{x,y}}_jitter : booleans or floats
*Currently non-functional.*
{params.rel.legend}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.lineplot}
{seealso.stripplot}
{seealso.swarmplot}
Examples
--------
.. include:: ../docstrings/scatterplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def relplot(
*,
x=None, y=None,
hue=None, size=None, style=None, data=None,
row=None, col=None,
col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter",
height=5, aspect=1, facet_kws=None,
units=None,
**kwargs
):
if kind == "scatter":
plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = "Plot kind {} not recognized".format(kind)
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the `ax` parameter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
p = plotter(
data=data,
variables=plotter.get_semantics(locals()),
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
plot_semantics = p.semantics
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Add the grid semantics onto the plotter
grid_semantics = "row", "col"
p.semantics = plot_semantics + grid_semantics
p.assign_variables(
data=data,
variables=dict(
x=x, y=y,
hue=hue, size=size, style=style, units=units,
row=row, col=col,
),
)
# Define the named variables for plotting on each facet
# Rename the variables with a leading underscore to avoid
# collisions with faceting variable names
plot_variables = {v: f"_{v}" for v in variables}
plot_kws.update(plot_variables)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
grid_kws = {v: p.variables.get(v, None) for v in grid_semantics}
# Rename the columns of the plot_data structure appropriately
new_cols = plot_variables.copy()
new_cols.update(grid_kws)
full_data = p.plot_data.rename(columns=new_cols)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
g = FacetGrid(
data=full_data.dropna(axis=1, how="all"),
**grid_kws,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes
g.set_axis_labels(
variables.get("x", None), variables.get("y", None)
)
# Show the legend
if legend:
# Replace the original plot data so the legend uses
# numeric data with the correct type
p.plot_data = plot_data
p.add_legend_data(g.axes.flat[0])
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order,
title=p.legend_title,
adjust_subtitles=True)
# Rename the columns of the FacetGrid's `data` attribute
# to match the original column names
orig_cols = {
f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
}
g.data = g.data.rename(columns=orig_cols)
return g
relplot.__doc__ = """\
Figure-level interface for drawing relational plots onto a FacetGrid.
This function provides access to several different axes-level functions
that show the relationship between two variables with semantic mappings
of subsets. The ``kind`` parameter selects the underlying axes-level
function to use:
- :func:`scatterplot` (with ``kind="scatter"``; the default)
- :func:`lineplot` (with ``kind="line"``)
Extra keyword arguments are passed to the underlying function, so you
should refer to the documentation for each to see kind-specific options.
{narrative.main_api}
{narrative.relational_semantic}
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, and other parameters.
Parameters
----------
{params.core.xy}
hue : vector or key in ``data``
Grouping variable that will produce elements with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in ``data``
Grouping variable that will produce elements with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in ``data``
Grouping variable that will produce elements with different styles.
Can have a numeric dtype but will always be treated as categorical.
{params.core.data}
{params.facets.rowcol}
{params.facets.col_wrap}
row_order, col_order : lists of strings
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.style_order}
{params.rel.dashes}
{params.rel.markers}
{params.rel.legend}
kind : string
Kind of plot to draw, corresponding to a seaborn relational plot.
Options are {{``scatter`` and ``line``}}.
{params.facets.height}
{params.facets.aspect}
facet_kws : dict
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
{params.rel.units}
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
{returns.facetgrid}
Examples
--------
.. include:: ../docstrings/relplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
| bsd-3-clause |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/IPython/qt/console/rich_ipython_widget.py | 4 | 15234 | # Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from base64 import decodestring
import os
import re
from IPython.external.qt import QtCore, QtGui
from IPython.lib.latextools import latex_to_png
from IPython.utils.path import ensure_dir_exists
from IPython.utils.traitlets import Bool
from IPython.qt.svg import save_svg, svg_to_clipboard, svg_to_image
from .ipython_widget import IPythonWidget
class RichIPythonWidget(IPythonWidget):
""" An IPythonWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichIPythonWidget protected class variables.
_payload_source_plot = 'IPython.kernel.zmq.pylab.backend_payload.add_plot_payload'
_jpg_supported = Bool(False)
# Used to determine whether a given html export attempt has already
# displayed a warning about being unable to convert a png to svg.
_svg_warning_displayed = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichIPythonWidget.
"""
kw['kind'] = 'rich'
super(RichIPythonWidget, self).__init__(*args, **kw)
# Configure the ConsoleWidget HTML exporter for our formats.
self._html_exporter.image_tag = self._get_image_tag
# Dictionary for resolving document resource names to SVG data.
self._name_to_svg_map = {}
# Do we support jpg ?
# it seems that sometime jpg support is a plugin of QT, so try to assume
# it is not always supported.
_supported_format = map(str, QtGui.QImageReader.supportedImageFormats())
self._jpg_supported = 'jpeg' in _supported_format
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface overides
#---------------------------------------------------------------------------
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
Overridden in order to reset the _svg_warning_displayed flag prior
to the export running.
"""
self._svg_warning_displayed = False
super(RichIPythonWidget, self).export_html()
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name:
menu = QtGui.QMenu()
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = self._name_to_svg_map.get(name, None)
if svg is not None:
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
else:
menu = super(RichIPythonWidget, self)._context_menu_make(pos)
return menu
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _pre_image_append(self, msg, prompt_number):
"""Append the Out[] prompt and make the output nicer
Shared code for some the following if statement
"""
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
self._append_plain_text('\n', True)
def _handle_execute_result(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("execute_result: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
metadata = msg['content']['metadata']
if 'image/svg+xml' in data:
self._pre_image_append(msg, prompt_number)
self._append_svg(data['image/svg+xml'], True)
self._append_html(self.output_sep2, True)
elif 'image/png' in data:
self._pre_image_append(msg, prompt_number)
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
self._append_html(self.output_sep2, True)
elif 'image/jpeg' in data and self._jpg_supported:
self._pre_image_append(msg, prompt_number)
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
self._append_html(self.output_sep2, True)
elif 'text/latex' in data:
self._pre_image_append(msg, prompt_number)
self._append_latex(data['text/latex'], True)
self._append_html(self.output_sep2, True)
else:
# Default back to the plain text representation.
return super(RichIPythonWidget, self)._handle_execute_result(msg)
def _handle_display_data(self, msg):
"""Overridden to handle rich data types, like SVG."""
self.log.debug("display_data: %s", msg.get('content', ''))
if self.include_output(msg):
self.flush_clearoutput()
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
self.log.debug("display: %s", msg.get('content', ''))
if 'image/svg+xml' in data:
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif 'image/png' in data:
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
elif 'image/jpeg' in data and self._jpg_supported:
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
elif 'text/latex' in data:
self._append_latex(data['text/latex'], True)
else:
# Default back to the plain text representation.
return super(RichIPythonWidget, self)._handle_display_data(msg)
#---------------------------------------------------------------------------
# 'RichIPythonWidget' protected interface
#---------------------------------------------------------------------------
def _append_latex(self, latex, before_prompt=False, metadata=None):
""" Append latex data to the widget."""
try:
png = latex_to_png(latex, wrap=False)
except Exception as e:
self.log.error("Failed to render latex: '%s'", latex, exc_info=True)
self._append_plain_text("Failed to render latex: %s" % e, before_prompt)
else:
self._append_png(png, before_prompt, metadata)
def _append_jpg(self, jpg, before_prompt=False, metadata=None):
""" Append raw JPG data to the widget."""
self._append_custom(self._insert_jpg, jpg, before_prompt, metadata=metadata)
def _append_png(self, png, before_prompt=False, metadata=None):
""" Append raw PNG data to the widget.
"""
self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)
def _append_svg(self, svg, before_prompt=False):
""" Append raw SVG data to the widget.
"""
self._append_custom(self._insert_svg, svg, before_prompt)
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = str(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
def _get_image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be
written (e.g., for linked images). If None, all images are to be
included inline.
format : "png"|"svg"|"jpg", optional [default "png"]
Format for returned or referenced images.
"""
if format in ("png","jpg"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if path is not None:
ensure_dir_exists(path)
relpath = os.path.basename(path)
if image.save("%s/qt_img%s.%s" % (path, match.group("name"), format),
"PNG"):
return '<img src="%s/qt_img%s.%s">' % (relpath,
match.group("name"),format)
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, format.upper())
buffer_.close()
return '<img src="data:image/%s;base64,\n%s\n" />' % (
format,re.sub(r'(.{60})',r'\1\n',str(ba.toBase64())))
elif format == "svg":
try:
svg = str(self._name_to_svg_map[match.group("name")])
except KeyError:
if not self._svg_warning_displayed:
QtGui.QMessageBox.warning(self, 'Error converting PNG to SVG.',
'Cannot convert PNG images to SVG, export with PNG figures instead. '
'If you want to export matplotlib figures as SVG, add '
'to your ipython config:\n\n'
'\tc.InlineBackend.figure_format = \'svg\'\n\n'
'And regenerate the figures.',
QtGui.QMessageBox.Ok)
self._svg_warning_displayed = True
return ("<b>Cannot convert PNG images to SVG.</b> "
"You must export this session with PNG images. "
"If you want to export matplotlib figures as SVG, add to your config "
"<span>c.InlineBackend.figure_format = 'svg'</span> "
"and regenerate the figures.")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
def _insert_jpg(self, cursor, jpg, metadata=None):
""" Insert raw PNG data into the widget."""
self._insert_img(cursor, jpg, 'jpg', metadata=metadata)
def _insert_png(self, cursor, png, metadata=None):
""" Insert raw PNG data into the widget.
"""
self._insert_img(cursor, png, 'png', metadata=metadata)
def _insert_img(self, cursor, img, fmt, metadata=None):
""" insert a raw image, jpg or png """
if metadata:
width = metadata.get('width', None)
height = metadata.get('height', None)
else:
width = height = None
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
if width and height:
image = image.scaled(width, height, transformMode=QtCore.Qt.SmoothTransformation)
elif width and not height:
image = image.scaledToWidth(width, transformMode=QtCore.Qt.SmoothTransformation)
elif height and not width:
image = image.scaledToHeight(height, transformMode=QtCore.Qt.SmoothTransformation)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
| mit |
asimshankar/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans.py | 13 | 20274 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A canned Estimator for k-means clustering."""
# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, loss_tensor, tolerance):
"""Creates a _LossRelativeChangeHook.
Args:
loss_tensor: A scalar tensor of the loss value.
tolerance: A relative tolerance of loss change between iterations.
"""
self._loss_tensor = loss_tensor
self._tolerance = tolerance
self._prev_loss = None
def before_run(self, run_context):
del run_context # unused
return session_run_hook.SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
loss = run_values.results
assert loss is not None
if self._prev_loss:
relative_change = (
abs(loss - self._prev_loss) / (1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes the cluster centers.
The chief repeatedly invokes an initialization op until all cluster centers
are initialized. The workers wait for the initialization phase to complete.
"""
def __init__(self, init_op, is_initialized_var, is_chief):
"""Creates an _InitializeClustersHook.
Args:
init_op: An op that, when run, will choose some initial cluster centers.
This op may need to be run multiple times to choose all the centers.
is_initialized_var: A boolean variable reporting whether all initial
centers have been chosen.
is_chief: A boolean specifying whether this task is the chief.
"""
self._init_op = init_op
self._is_initialized_var = is_initialized_var
self._is_chief = is_chief
def after_create_session(self, session, coord):
del coord # unused
assert self._init_op.graph is ops.get_default_graph()
assert self._is_initialized_var.graph is self._init_op.graph
while True:
try:
if session.run(self._is_initialized_var):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_features_if_necessary(features, feature_columns):
"""Helper function to convert the input points into a usable format.
Args:
features: The input features.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column instances
that can be passed to `tf.feature_column.input_layer`. If this is None,
all features will be used.
Returns:
If `features` is a dict of `k` features (optionally filtered by
`feature_columns`), each of which is a vector of `n` scalars, the return
value is a Tensor of shape `(n, k)` representing `n` input points, where the
items in the `k` dimension are sorted lexicographically by `features` key.
If `features` is not a dict, it is returned unmodified.
"""
if not isinstance(features, dict):
return features
if feature_columns:
return fc.input_layer(features, feature_columns)
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
return array_ops.concat([features[k] for k in keys], axis=1)
class _ModelFn(object):
"""Model function for the estimator."""
def __init__(self, num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns):
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._relative_tolerance = relative_tolerance
self._feature_columns = feature_columns
def model_fn(self, features, mode, config):
"""Model function for the estimator.
Note that this does not take a `labels` arg. This works, but `input_fn` must
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See `tf.estimator.Estimator`.
mode: See `tf.estimator.Estimator`.
config: See `tf.estimator.Estimator`.
Returns:
A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
* `loss`: The sum of the squared distances from each input point to its
closest center.
* `eval_metric_ops`: Maps `SCORE` to `loss`.
* `predictions`: Maps `ALL_DISTANCES` to the distance from each input
point to each cluster center; maps `CLUSTER_INDEX` to the index of
the closest cluster center for each input point.
"""
# input_points is a single Tensor. Therefore, the sharding functionality
# in clustering_ops is unused, and some of the values below are lists of a
# single item.
input_points = _parse_features_if_necessary(features, self._feature_columns)
# Let N = the number of input_points.
# all_distances: A list of one matrix of shape (N, num_clusters). Each value
# is the distance from an input point to a cluster center.
# model_predictions: A list of one vector of shape (N). Each value is the
# cluster id of an input point.
# losses: Similar to cluster_idx but provides the distance to the cluster
# center.
# is_initialized: scalar indicating whether the initial cluster centers
# have been chosen; see init_op.
# init_op: an op to choose the initial cluster centers. A single worker
# repeatedly executes init_op until is_initialized becomes True.
# training_op: an op that runs an iteration of training, either an entire
# Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
# may execute this op, but only after is_initialized becomes True.
(all_distances, model_predictions, losses, is_initialized, init_op,
training_op) = clustering_ops.KMeans(
inputs=input_points,
num_clusters=self._num_clusters,
initial_clusters=self._initial_clusters,
distance_metric=self._distance_metric,
use_mini_batch=self._use_mini_batch,
mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
).training_graph()
loss = math_ops.reduce_sum(losses)
summary.scalar('loss/raw', loss)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = control_flow_ops.with_dependencies([training_op, incr_step],
loss)
training_hooks = [
_InitializeClustersHook(init_op, is_initialized, config.is_chief)
]
if self._relative_tolerance is not None:
training_hooks.append(
_LossRelativeChangeHook(loss, self._relative_tolerance))
export_outputs = {
KMeansClustering.ALL_DISTANCES:
export_output.PredictOutput(all_distances[0]),
KMeansClustering.CLUSTER_INDEX:
export_output.PredictOutput(model_predictions[0]),
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(model_predictions[0])
}
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions={
KMeansClustering.ALL_DISTANCES: all_distances[0],
KMeansClustering.CLUSTER_INDEX: model_predictions[0],
},
loss=loss,
train_op=training_op,
eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
training_hooks=training_hooks,
export_outputs=export_outputs)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
Example:
```
import numpy as np
import tensorflow as tf
num_points = 100
dimensions = 2
points = np.random.uniform(0, 1000, [num_points, dimensions])
def input_fn():
return tf.train.limit_epochs(
tf.convert_to_tensor(points, dtype=tf.float32), num_epochs=1)
num_clusters = 5
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=num_clusters, use_mini_batch=False)
# train
num_iterations = 10
previous_centers = None
for _ in xrange(num_iterations):
kmeans.train(input_fn)
cluster_centers = kmeans.cluster_centers()
if previous_centers is not None:
print 'delta:', cluster_centers - previous_centers
previous_centers = cluster_centers
print 'score:', kmeans.score(input_fn)
print 'cluster centers:', cluster_centers
# map the input points to their clusters
cluster_indices = list(kmeans.predict_cluster_index(input_fn))
for i, point in enumerate(points):
cluster_index = cluster_indices[i]
center = cluster_centers[cluster_index]
print 'point:', point, 'is in cluster', cluster_index, 'centered at', center
```
The `SavedModel` saved by the `export_savedmodel` method does not include the
cluster centers. However, the cluster centers may be retrieved by the
latest checkpoint saved during training. Specifically,
```
kmeans.cluster_centers()
```
is equivalent to
```
tf.train.load_variable(
kmeans.model_dir, KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
```
"""
# Valid values for the distance_metric constructor argument.
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
# Values for initial_clusters constructor argument.
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# Metric returned by evaluate(): The sum of the squared distances from each
# input point to its closest center.
SCORE = 'score'
# Keys returned by predict().
# ALL_DISTANCES: The distance from each input point to each cluster center.
# CLUSTER_INDEX: The index of the closest cluster center for each input point.
CLUSTER_INDEX = 'cluster_index'
ALL_DISTANCES = 'all_distances'
# Variable name used by cluster_centers().
CLUSTER_CENTERS_VAR_NAME = clustering_ops.CLUSTERS_VAR_NAME
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None,
feature_columns=None):
"""Creates an Estimator for running KMeans training and inference.
This Estimator implements the following variants of the K-means algorithm:
If `use_mini_batch` is False, it runs standard full batch K-means. Each
training step runs a single iteration of K-Means and must process the full
input at once. To run in this mode, the `input_fn` passed to `train` must
return the entire input dataset.
If `use_mini_batch` is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of `mini_batch_steps_per_iteration` steps. Each training step
accumulates the contribution from one mini-batch into temporary storage.
Every `mini_batch_steps_per_iteration` steps, the cluster centers are
updated and the temporary storage cleared for the next iteration. Note
that:
* If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the
standard K-means mini-batch algorithm.
* If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the
algorithm becomes an asynchronous version of the full-batch algorithm.
However, there is no guarantee by this implementation that each input
is seen exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not
behave exactly like a full-batch version.
Args:
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if `initial_clusters` is a tensor or numpy array.
model_dir: The directory to save the model results and log files.
initial_clusters: Specifies how the initial cluster centers are chosen.
One of the following:
* a tensor or numpy array with the initial cluster centers.
* a callable `f(inputs, k)` that selects and returns up to `k` centers
from an input batch. `f` is free to return any number of centers
from `0` to `k`. It will be invoked on successive input batches
as necessary until all `num_clusters` centers are chosen.
* `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input
batch. If the batch size is less than `num_clusters` then the
entire batch is chosen to be initial cluster centers and the
remaining centers are chosen from successive input batches.
* `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose
centers from the first input batch. If the batch size is less
than `num_clusters`, a TensorFlow runtime error occurs.
distance_metric: The distance metric used for clustering. One of:
* `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance
between vectors `u` and `v` is defined as \\(||u - v||_2\\)
which is the square root of the sum of the absolute squares of
the elements' difference.
* `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors
`u` and `v` is defined as \\(1 - (u . v) / (||u||_2 ||v||_2)\\).
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: A boolean specifying whether to use the mini-batch k-means
algorithm. See explanation above.
mini_batch_steps_per_iteration: The number of steps after which the
updated cluster centers are synced back to a master copy. Used only if
`use_mini_batch=True`. See explanation above.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample `O(log(num_to_sample))` additional points. Used only if
`initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See `tf.estimator.Estimator`.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to `tf.feature_column.input_layer`. If this
is None, all features will be used.
Raises:
ValueError: An invalid argument was passed to `initial_clusters` or
`distance_metric`.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [
KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
KMeansClustering.COSINE_DISTANCE
]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
super(KMeansClustering, self).__init__(
model_fn=_ModelFn(
num_clusters, initial_clusters, distance_metric, random_seed,
use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns).model_fn,
model_dir=model_dir,
config=config)
def _predict_one_key(self, input_fn, predict_key):
for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):
yield result[predict_key]
def predict_cluster_index(self, input_fn):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The index of the closest cluster center for each input point.
"""
for index in self._predict_one_key(input_fn,
KMeansClustering.CLUSTER_INDEX):
yield index
def score(self, input_fn):
"""Returns the sum of squared distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative sum.
Args:
input_fn: Input points. See `tf.estimator.Estimator.evaluate`. Only one
batch is retrieved.
Returns:
The sum of the squared distance from each point in the first batch of
inputs to its nearest cluster center.
"""
return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]
def transform(self, input_fn):
"""Transforms each input point to its distances to all cluster centers.
Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,
this
function returns the squared Euclidean distance while the corresponding
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The distances from each input point to each cluster center.
"""
for distances in self._predict_one_key(input_fn,
KMeansClustering.ALL_DISTANCES):
yield distances
def cluster_centers(self):
"""Returns the cluster centers."""
return self.get_variable_value(KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
| apache-2.0 |
jjx02230808/project0223 | examples/gaussian_process/plot_gpr_co2.py | 131 | 5705 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214::
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_compute_rt_decoder.py | 3 | 3932 | """
=======================
Decoding real-time data
=======================
Supervised machine learning applied to MEG data in sensor space.
Here the classifier is updated every 5 trials and the decoding
accuracy is plotted
"""
# Authors: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import MockRtClient, RtEpochs
from mne.datasets import sample
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
tr_percent = 60 # Training percentage
min_trials = 10 # minimum trials after which decoding should start
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1,
reject=dict(grad=4000e-13, eog=150e-6), baseline=None,
isi_max=4.)
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=90, buffer_size=1000)
# Decoding in sensor space using a linear SVM
n_times = len(rt_epochs.times)
from sklearn import preprocessing # noqa
from sklearn.svm import SVC # noqa
from sklearn.pipeline import Pipeline # noqa
from sklearn.model_selection import cross_val_score, ShuffleSplit # noqa
from mne.decoding import Vectorizer, FilterEstimator # noqa
scores_x, scores, std_scores = [], [], []
# don't highpass filter because it's epoched data and the signal length
# is small
filt = FilterEstimator(rt_epochs.info, None, 40, fir_design='firwin')
scaler = preprocessing.StandardScaler()
vectorizer = Vectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
data_picks = mne.pick_types(rt_epochs.info, meg='grad', eeg=False, eog=True,
stim=False, exclude=raw.info['bads'])
ax = plt.subplot(111)
ax.set_xlabel('Trials')
ax.set_ylabel('Classification score (% correct)')
ax.set_title('Real-time decoding')
ax.set_xlim([min_trials, 50])
ax.set_ylim([30, 105])
plt.axhline(50, color='k', linestyle='--', label="Chance level")
plt.show(block=False)
for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ev_num + 1))
if ev_num == 0:
X = ev.data[None, data_picks, :]
y = int(ev.comment) # the comment attribute contains the event_id
else:
X = np.concatenate((X, ev.data[None, data_picks, :]), axis=0)
y = np.append(y, int(ev.comment))
if ev_num >= min_trials:
cv = ShuffleSplit(5, test_size=0.2, random_state=42)
scores_t = cross_val_score(concat_classifier, X, y, cv=cv,
n_jobs=1) * 100
std_scores.append(scores_t.std())
scores.append(scores_t.mean())
scores_x.append(ev_num)
# Plot accuracy
plt.plot(scores_x[-2:], scores[-2:], '-x', color='b',
label="Classif. score")
ax.plot(scores_x[-1], scores[-1])
hyp_limits = (np.asarray(scores) - np.asarray(std_scores),
np.asarray(scores) + np.asarray(std_scores))
fill = plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1],
color='b', alpha=0.5)
plt.pause(0.01)
plt.draw()
ax.collections.remove(fill) # Remove old fill area
plt.fill_between(scores_x, hyp_limits[0], y2=hyp_limits[1], color='b',
alpha=0.5)
plt.draw() # Final figure
| bsd-3-clause |
WZQ1397/automatic-repo | project/DataAnalysis/pandas/pandaCsvOpsDemo.py | 1 | 4906 | # -*- coding: utf-8 -*-
# Author: Zach.Wang
# @Time : 2020-02-28 13:47
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
'''
读写文本格式的数据
read_csv
read_table
'''
data = DataFrame(np.arange(16).reshape(4, 4), index=list('abcd'), columns=['hhb', 'zjx', 'hcy', 'zjy'])
print(data)
data.to_csv('data/ex1.csv')
rdata = pd.read_csv('data/ex1.csv')
print(rdata)
# Unnamed: 0 hhb zjx hcy zjy
# 0 a 0 1 2 3
# 1 b 4 5 6 7
# 2 c 8 9 10 11
# 3 d 12 13 14 15
rdata = pd.read_table('data/ex1.csv', sep=',')
print(rdata)
# Unnamed: 0 hhb zjx hcy zjy
# 0 a 0 1 2 3
# 1 b 4 5 6 7
# 2 c 8 9 10 11
# 3 d 12 13 14 15
'''
配置默认名字
'''
# data=DataFrame(np.random.randn(3,4))
# print(data)
# # 0 1 2 3
# # 0 1.095597 0.454671 0.503149 -0.337012
# # 1 -0.688659 -1.455076 0.826556 0.823949
# # 2 1.122201 0.303618 -0.399119 0.979075
#
# data.to_csv('data/ex2.csv')
rdata = pd.read_csv('data/ex2.csv', header=None)
print(rdata)
# 0 1 2 3
# 0 -0.310421 -0.323209 0.996199 0.927549
# 1 -0.076534 -0.160730 -1.780651 -1.069414
# 2 -0.703372 -1.265776 -0.117108 -0.164619
# 配置名字
rdata = pd.read_csv('data/ex2.csv', names=['a', 'b', 'c', 'd'])
print(rdata)
# a b c d
# 0 -0.310421 -0.323209 0.996199 aaa
# 1 -0.076534 -0.160730 -1.780651 bbb
# 2 -0.703372 -1.265776 -0.117108 ccc
# 让d列成为行索引
names = ['a', 'b', 'c', '行索引']
rdata = pd.read_csv('data/ex2.csv', names=names, index_col='行索引')
print(rdata)
# a b c
# 行索引
# aaa -0.310421 -0.323209 0.996199
# bbb -0.076534 -0.160730 -1.780651
# ccc -0.703372 -1.265776 -0.117108
'''
解读程序化
'''
# data=DataFrame(np.arange(16).reshape(8,2),index=[['one','one','one','one','two','two','two','one'],['a','b','c','d','a','b','c','d']],columns=['key1','key2'])
#
# data.to_csv('data/ex3.csv')
# value1,value2,key1,key2
# one,a,0,1
# one,b,2,3
# one,c,4,5
# one,d,6,7
# two,a,8,9
# two,b,10,11
# two,c,12,13
# one,d,14,15
rdata = pd.read_csv('data/ex3.csv', index_col=['value1', 'value2'])
print(rdata)
# key1 key2
# value1 value2
# one a 0 1
# b 2 3
# c 4 5
# d 6 7
# two a 8 9
# b 10 11
# c 12 13
# one d 14 15
# 筛选阅读
rdata = pd.read_csv('data/ex4.csv', skiprows=[0, 2])
print(rdata)
# value1 value2 key1 key2
# 0 one a 0 1
# 1 one b 2 3
# 2 one c 4 5
# 3 one d 6 7
# 4 two a 8 9
# 5 two b 10 11
# 6 two c 12 13
# 7 one d 14 15
data = pd.read_csv('data/ex5.csv')
print(data)
# value1 value2 key1 key2
# 0 one NaN 0 1
# 1 one b 2 3
# 2 one c 4 Na
# 指定NaN值
sentinels = {'key2': [3, 'Na'], 'value2': 'b'}
data = pd.read_csv('data/ex5.csv', na_values=sentinels)
print(data)
# value1 value2 key1 key2
# 0 one NaN 0 1.0
# 1 one NaN 2 NaN
# 2 one c 4 NaN
'''
逐块读取文件
'''
data = pd.read_csv('data/ex4.csv', nrows=2, skiprows=[0, 2])
print(data)
# value1 value2 key1 key2
# 0 one a 0 1
# 1 one b 2 3
# 将数据分成块
chunker = pd.read_csv('data/ex3.csv', chunksize=2)
print(chunker) # <pandas.io.parsers.TextFileReader object at 0x000000000B1B9F60>
tot = Series([])
for piece in chunker:
tot = tot.add(piece['value1'].value_counts(), fill_value=0)
print(tot)
# one 5.0
# two 3.0
# dtype: float64
print(tot[0])
'''
输出文本格式,自定义分隔符
'''
data = pd.read_csv('data/ex3.csv')
print(data)
import sys
data.to_csv('data/ex6.csv', sep='|')
# |value1|value2|key1|key2
# 0|one|a|0|1
# 1|one|b|2|3
# 2|one|c|4|5
# 3|one|d|6|7
# 4|two|a|8|9
# 5|two|b|10|11
# 6|two|c|12|13
# 7|one|d|14|15
data = pd.read_csv('data/ex5.csv')
data.to_csv('data/ex7.csv', na_rep='null') # 将空字符串定义为null
# ,value1,value2,key1,key2
# 0,one,null,0,1
# 1,one,b,2,3
# 2,one,c,4,Na
# 禁用索引
data.to_csv('data/ex8.csv', index=False, header=False)
# one,,0,1
# one,b,2,3
# one,c,4,Na
# 指定排列顺序
data.to_csv('data/ex9.csv', columns=['key1', 'key2', 'value1', 'value2'])
# ,key1,key2,value1,value2
# 0,0,1,one,
# 1,2,3,one,b
# 2,4,Na,one,c
'''
json
'''
import json
obj = '{"name":"hb","data":[1,2,3,4,5],"dict":[{"name":"aa"},{"name":"bb"}],"Na":1}'
# result=json.dumps(obj)
# print(result)
data = json.loads(obj)
print(data)
dict = DataFrame(data['dict'], columns=['name'])
print(dict)
# name
# 0 aa
# 1 bb | lgpl-3.0 |
florentchandelier/zipline | tests/test_security_list.py | 5 | 12556 | from datetime import timedelta
import pandas as pd
from testfixtures import TempDirectory
from nose_parameterized import parameterized
from zipline.algorithm import TradingAlgorithm
from zipline.errors import TradingControlViolation
from zipline.testing import (
add_security_data,
create_data_portal,
security_list_copy,
tmp_trading_env,
tmp_dir,
)
from zipline.testing.fixtures import (
WithLogger,
WithTradingEnvironment,
ZiplineTestCase,
)
from zipline.utils import factory
from zipline.utils.security_list import (
SecurityListSet,
load_from_directory,
)
LEVERAGED_ETFS = load_from_directory('leveraged_etf_list')
class RestrictedAlgoWithCheck(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_asset_restrictions(self.rl.restrict_leveraged_etfs)
self.order_count = 0
self.sid = self.symbol(symbol)
def handle_data(self, data):
if not self.order_count:
if self.sid not in \
self.rl.leveraged_etf_list.\
current_securities(self.get_datetime()):
self.order(self.sid, 100)
self.order_count += 1
class RestrictedAlgoWithoutCheck(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_asset_restrictions(self.rl.restrict_leveraged_etfs)
self.order_count = 0
self.sid = self.symbol(symbol)
def handle_data(self, data):
self.order(self.sid, 100)
self.order_count += 1
class RestrictedAlgoWithoutCheckSetDoNotOrderList(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_do_not_order_list(self.rl.leveraged_etf_list)
self.order_count = 0
self.sid = self.symbol(symbol)
def handle_data(self, data):
self.order(self.sid, 100)
self.order_count += 1
class IterateRLAlgo(TradingAlgorithm):
def initialize(self, symbol):
self.rl = SecurityListSet(self.get_datetime, self.asset_finder)
self.set_asset_restrictions(self.rl.restrict_leveraged_etfs)
self.order_count = 0
self.sid = self.symbol(symbol)
self.found = False
def handle_data(self, data):
for stock in self.rl.leveraged_etf_list.\
current_securities(self.get_datetime()):
if stock == self.sid:
self.found = True
class SecurityListTestCase(WithLogger,
WithTradingEnvironment,
ZiplineTestCase):
@classmethod
def init_class_fixtures(cls):
super(SecurityListTestCase, cls).init_class_fixtures()
# this is ugly, but we need to create two different
# TradingEnvironment/DataPortal pairs
cls.start = pd.Timestamp(list(LEVERAGED_ETFS.keys())[0])
end = pd.Timestamp('2015-02-17', tz='utc')
cls.extra_knowledge_date = pd.Timestamp('2015-01-27', tz='utc')
cls.trading_day_before_first_kd = pd.Timestamp('2015-01-23', tz='utc')
symbols = ['AAPL', 'GOOG', 'BZQ', 'URTY', 'JFT']
cls.env = cls.enter_class_context(tmp_trading_env(
equities=pd.DataFrame.from_records([{
'start_date': cls.start,
'end_date': end,
'symbol': symbol,
'exchange': "TEST",
} for symbol in symbols]),
load=cls.make_load_function(),
))
cls.sim_params = factory.create_simulation_parameters(
start=cls.start,
num_days=4,
trading_calendar=cls.trading_calendar
)
cls.sim_params2 = sp2 = factory.create_simulation_parameters(
start=cls.trading_day_before_first_kd, num_days=4
)
cls.env2 = cls.enter_class_context(tmp_trading_env(
equities=pd.DataFrame.from_records([{
'start_date': sp2.start_session,
'end_date': sp2.end_session,
'symbol': symbol,
'exchange': "TEST",
} for symbol in symbols]),
load=cls.make_load_function(),
))
cls.tempdir = cls.enter_class_context(tmp_dir())
cls.tempdir2 = cls.enter_class_context(tmp_dir())
cls.data_portal = create_data_portal(
asset_finder=cls.env.asset_finder,
tempdir=cls.tempdir,
sim_params=cls.sim_params,
sids=range(0, 5),
trading_calendar=cls.trading_calendar,
)
cls.data_portal2 = create_data_portal(
asset_finder=cls.env2.asset_finder,
tempdir=cls.tempdir2,
sim_params=cls.sim_params2,
sids=range(0, 5),
trading_calendar=cls.trading_calendar,
)
def test_iterate_over_restricted_list(self):
algo = IterateRLAlgo(symbol='BZQ', sim_params=self.sim_params,
env=self.env)
algo.run(self.data_portal)
self.assertTrue(algo.found)
def test_security_list(self):
# set the knowledge date to the first day of the
# leveraged etf knowledge date.
def get_datetime():
return self.start
rl = SecurityListSet(get_datetime, self.env.asset_finder)
# assert that a sample from the leveraged list are in restricted
should_exist = [
asset.sid for asset in
[self.env.asset_finder.lookup_symbol(
symbol,
as_of_date=self.extra_knowledge_date)
for symbol in ["BZQ", "URTY", "JFT"]]
]
for sid in should_exist:
self.assertIn(
sid, rl.leveraged_etf_list.current_securities(get_datetime()))
# assert that a sample of allowed stocks are not in restricted
shouldnt_exist = [
asset.sid for asset in
[self.env.asset_finder.lookup_symbol(
symbol,
as_of_date=self.extra_knowledge_date)
for symbol in ["AAPL", "GOOG"]]
]
for sid in shouldnt_exist:
self.assertNotIn(
sid, rl.leveraged_etf_list.current_securities(get_datetime()))
def test_security_add(self):
def get_datetime():
return pd.Timestamp("2015-01-27", tz='UTC')
with security_list_copy():
add_security_data(['AAPL', 'GOOG'], [])
rl = SecurityListSet(get_datetime, self.env.asset_finder)
should_exist = [
asset.sid for asset in
[self.env.asset_finder.lookup_symbol(
symbol,
as_of_date=self.extra_knowledge_date
) for symbol in ["AAPL", "GOOG", "BZQ", "URTY"]]
]
for sid in should_exist:
self.assertIn(
sid,
rl.leveraged_etf_list.current_securities(get_datetime())
)
def test_security_add_delete(self):
with security_list_copy():
def get_datetime():
return pd.Timestamp("2015-01-27", tz='UTC')
rl = SecurityListSet(get_datetime, self.env.asset_finder)
self.assertNotIn(
"BZQ",
rl.leveraged_etf_list.current_securities(get_datetime())
)
self.assertNotIn(
"URTY",
rl.leveraged_etf_list.current_securities(get_datetime())
)
def test_algo_without_rl_violation_via_check(self):
algo = RestrictedAlgoWithCheck(symbol='BZQ',
sim_params=self.sim_params,
env=self.env)
algo.run(self.data_portal)
def test_algo_without_rl_violation(self):
algo = RestrictedAlgoWithoutCheck(symbol='AAPL',
sim_params=self.sim_params,
env=self.env)
algo.run(self.data_portal)
@parameterized.expand([
('using_set_do_not_order_list',
RestrictedAlgoWithoutCheckSetDoNotOrderList),
('using_set_restrictions', RestrictedAlgoWithoutCheck),
])
def test_algo_with_rl_violation(self, name, algo_class):
algo = algo_class(symbol='BZQ',
sim_params=self.sim_params,
env=self.env)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run(self.data_portal)
self.check_algo_exception(algo, ctx, 0)
# repeat with a symbol from a different lookup date
algo = RestrictedAlgoWithoutCheck(symbol='JFT',
sim_params=self.sim_params,
env=self.env)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run(self.data_portal)
self.check_algo_exception(algo, ctx, 0)
def test_algo_with_rl_violation_after_knowledge_date(self):
sim_params = factory.create_simulation_parameters(
start=self.start + timedelta(days=7),
num_days=5
)
data_portal = create_data_portal(
self.env.asset_finder,
self.tempdir,
sim_params=sim_params,
sids=range(0, 5),
trading_calendar=self.trading_calendar,
)
algo = RestrictedAlgoWithoutCheck(symbol='BZQ',
sim_params=sim_params,
env=self.env)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run(data_portal)
self.check_algo_exception(algo, ctx, 0)
def test_algo_with_rl_violation_cumulative(self):
"""
Add a new restriction, run a test long after both
knowledge dates, make sure stock from original restriction
set is still disallowed.
"""
sim_params = factory.create_simulation_parameters(
start=self.start + timedelta(days=7),
num_days=4
)
with security_list_copy():
add_security_data(['AAPL'], [])
algo = RestrictedAlgoWithoutCheck(
symbol='BZQ', sim_params=sim_params, env=self.env)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run(self.data_portal)
self.check_algo_exception(algo, ctx, 0)
def test_algo_without_rl_violation_after_delete(self):
sim_params = factory.create_simulation_parameters(
start=self.extra_knowledge_date,
num_days=4,
)
equities = pd.DataFrame.from_records([{
'symbol': 'BZQ',
'start_date': sim_params.start_session,
'end_date': sim_params.end_session,
'exchange': "TEST",
}])
with TempDirectory() as new_tempdir, \
security_list_copy(), \
tmp_trading_env(equities=equities,
load=self.make_load_function()) as env:
# add a delete statement removing bzq
# write a new delete statement file to disk
add_security_data([], ['BZQ'])
data_portal = create_data_portal(
env.asset_finder,
new_tempdir,
sim_params,
range(0, 5),
trading_calendar=self.trading_calendar,
)
algo = RestrictedAlgoWithoutCheck(
symbol='BZQ', sim_params=sim_params, env=env
)
algo.run(data_portal)
def test_algo_with_rl_violation_after_add(self):
with security_list_copy():
add_security_data(['AAPL'], [])
algo = RestrictedAlgoWithoutCheck(symbol='AAPL',
sim_params=self.sim_params2,
env=self.env2)
with self.assertRaises(TradingControlViolation) as ctx:
algo.run(self.data_portal2)
self.check_algo_exception(algo, ctx, 2)
def check_algo_exception(self, algo, ctx, expected_order_count):
self.assertEqual(algo.order_count, expected_order_count)
exc = ctx.exception
self.assertEqual(TradingControlViolation, type(exc))
exc_msg = str(ctx.exception)
self.assertTrue("RestrictedListOrder" in exc_msg)
| apache-2.0 |
murali-munna/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
jmschrei/PyPore | setup.py | 1 | 1410 | from distutils.core import setup
from distutils.extension import Extension
import numpy as np
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
if use_cython:
ext_modules = [
Extension("PyPore.cparsers", [ "PyPore/cparsers.pyx" ], include_dirs=[np.get_include()] ),
Extension("PyPore.calignment", [ "PyPore/calignment.pyx" ], include_dirs=[np.get_include()] )
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules = [
Extension("PyPore.cparsers", [ "PyPore/cparsers.c" ], include_dirs=[np.get_include()] ),
Extension("PyPore.calignment", [ "PyPore/calignment.c" ], include_dirs=[np.get_include()] )
]
setup(
name='pythonic-porin',
version='0.2.0',
author='Jacob Schreiber',
author_email='[email protected]',
packages=['PyPore'],
url='http://pypi.python.org/pypi/pythonic-porin/',
license='LICENSE.txt',
description='Nanopore Data Analysis package. Provides tools for reading data,\
performing event detection, segmentation, visualization, and analysis using\
hidden Markov models, and other tools. Designed for the UCSC Nanopore Group.',
cmdclass=cmdclass,
ext_modules=ext_modules,
install_requires=[
"cython >= 0.20.1",
"numpy >= 1.8.0",
"matplotlib >= 1.3.1"
],
)
| mit |
mlperf/inference_results_v0.7 | closed/Cisco/code/dlrm-99/tensorrt/accuracy-dlrm.py | 18 | 3009 | #! /usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
sys.path.insert(0, os.getcwd())
import argparse
import json
import numpy as np
from sklearn.metrics import roc_auc_score
import datetime
def evaluate(log_path, ground_truth_file, sample_partition_file):
print("Loading ground truths...")
ground_truths = np.load(ground_truth_file)
print("Done loading ground truths.")
print("Loading sample partition...")
sample_partition = np.load(sample_partition_file)
print("Parsing LoadGen accuracy log...")
expected = []
predicted = []
with open(log_path) as f:
predictions = json.load(f)
for counter, prediction in enumerate(predictions):
if counter % 1000 == 0:
print("[{:}] {:} / {:}".format(datetime.datetime.now(), counter, len(predictions)))
qsl_idx = prediction["qsl_idx"]
assert qsl_idx < len(sample_partition), "qsl_idx exceeds total number of samples in validation dataset"
data = np.frombuffer(bytes.fromhex(prediction["data"]), np.float32)
start_idx = sample_partition[qsl_idx]
end_idx = sample_partition[qsl_idx+1]
assert len(data) == end_idx - start_idx, "Length of predictions does not match number of pairs in sample"
for i in data:
predicted.append(np.nan_to_num(i))
for i in range(start_idx, end_idx):
expected.append(ground_truths[i])
print("Done parsing LoadGen accuracy log.")
print("Evaluating results...")
score = roc_auc_score(expected, predicted)
print("Done evaluating results.")
print("auc={:.3f}%".format(score * 100))
def main():
parser = argparse.ArgumentParser("Accuracy checker for BERT benchmark from LoadGen logs")
parser.add_argument("--mlperf-accuracy-file", help="Path to LoadGen log produced in AccuracyOnly mode")
parser.add_argument("--ground-truth-file", help="Path to ground_truth.npy file",
default="build/preprocessed_data/criteo/full_recalib/ground_truth.npy")
parser.add_argument("--sample-partition-file", help="Path to sample partition file",
default=os.path.join(os.environ.get("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "criteo", "full_recalib", "sample_partition.npy"))
args = parser.parse_args()
evaluate(args.mlperf_accuracy_file, args.ground_truth_file, args.sample_partition_file)
if __name__ == "__main__":
main()
| apache-2.0 |
CDE-UNIBE/lokp | lokp/protocols/shp_protocol.py | 1 | 6801 | import glob
import os
import shutil
import uuid
import zipfile
from cgi import FieldStorage
import geojson
import geopandas
from fiona.errors import FionaValueError
from pyramid.view import view_config
from lokp.config.files import upload_directory_path
class ShapefileProtocol:
valid_file_types = [
'application/x-dbf',
'application/octet-stream', # Not ideal ...
'application/x-esri-shape',
'application/zip',
]
valid_shapes = ['Polygon']
shapefile_required_files = ['.shp', '.dbf', '.shx']
default_crs = 'epsg:4326'
def __init__(self, request):
self.request = request
self.file_fields = []
self.is_zip_file = False
self.error = None
self.geojson = {}
self.temp_name = str(uuid.uuid4())
upload_path = upload_directory_path(self.request)
self.temp_folder = os.path.join(upload_path, f'temp_{self.temp_name}')
@view_config(route_name='shp_upload', renderer='json')
def process_uploaded_shapefile(self):
"""
This function is called as a POST request containing a shapefile
(actually consisting of several files) or a zip file. The shapefile is
parsed and if it contains valid geometries, these are returned in
GeoJSON format.
:return: dict.
"""
self.validate_file_types()
if self.error:
return self.return_error()
self.save_files()
# If uploaded file is a zip file, extract it.
if len(self.file_fields) == 1 and \
self.get_file_ending(self.file_fields[0].filename) == '.zip':
self.extract_zip()
self.validate_shp_parts()
if self.error:
return self.return_error()
self.read_shapefile()
if self.error:
return self.return_error()
self.remove_temp_dir()
return self.geojson
def return_error(self):
"""
An error occured. Return with appropriate status code and error message.
"""
self.remove_temp_dir()
self.request.response.status = 400
return {'error': self.error}
def read_shapefile(self):
"""
Read the shapefile and extract its geometries.
"""
# When extracting zip files, we need to find out the name of the
# shapefile
shapefile_path = self.find_shp_in_directory(self.temp_folder)
try:
geom_data_frame = geopandas.read_file(shapefile_path)
except FionaValueError:
self.error = 'Invalid file.'
return
# If the data is not in the default CRS, reproject it.
if geom_data_frame.crs.get('init') != self.default_crs:
geom_data_frame = geom_data_frame.to_crs({'init': self.default_crs})
# Check geometry types.
for index, row in geom_data_frame.iterrows():
if row.geometry.geom_type not in self.valid_shapes:
self.error = f'Invalid geometry. Supported geometry types: ' \
f'{", ".join(self.valid_shapes)}'
return
# The server must return only the geometry part(s) of the features, not
# the entire geojson.
geom_json = geojson.loads(geom_data_frame.to_json())
geometries = [f['geometry'] for f in geom_json['features']]
if len(geometries) != 1:
# If there are multiple features, create a single MultiPolygon out
# of them.
coordinates = [geom['coordinates'] for geom in geometries]
geometries = [geojson.MultiPolygon(tuple(coordinates))]
self.geojson = geojson.dumps(geometries[0])
def validate_shp_parts(self):
"""
Check that all files of a valid shapefile are available.
"""
file_endings = [
self.get_file_ending(filename) for filename in glob.glob(
os.path.join(self.temp_folder, '*'))]
missing_parts = set(self.shapefile_required_files) - set(file_endings)
if missing_parts:
self.error = 'Missing required parts of shapefile: %s' % ', '.join(
missing_parts)
return
def extract_zip(self):
"""
Extract an uploaded zipfile to the temporary location.
"""
file_ending = self.get_file_ending(self.file_fields[0].filename)
file_path = os.path.join(
self.temp_folder, f'{self.temp_name}{file_ending}')
with zipfile.ZipFile(file_path, 'r') as zip_ref:
zip_ref.extractall(self.temp_folder)
def save_files(self):
"""
Save uploaded files to a temporary location.
"""
# Create temporary upload directory
if not os.path.exists(self.temp_folder):
os.makedirs(self.temp_folder)
# Save all files
for field in self.file_fields:
input_file = field.file
# Prepare file name
file_ending = self.get_file_ending(field.filename)
file_path = os.path.join(
self.temp_folder, f'{self.temp_name}{file_ending}')
# Create a temporary file to prevent incomplete files from being
# used.
temp_file_path = f'{file_path}~'
input_file.seek(0)
with open(temp_file_path, 'wb') as output_file:
shutil.copyfileobj(input_file, output_file)
# Rename the temporary file
os.rename(temp_file_path, file_path)
def validate_file_types(self):
"""
Check that only valid file types are being sent.
"""
file_fields = []
for data in self.request.POST.items():
field = data[1]
if not isinstance(field, FieldStorage):
continue
file_fields.append(field)
invalid_file_types = set(
[f.type for f in file_fields]) - set(self.valid_file_types)
if invalid_file_types:
self.error = 'Invalid file types: %s' % ', '.join(
invalid_file_types)
return
self.file_fields = file_fields
def remove_temp_dir(self):
if os.path.exists(self.temp_folder):
shutil.rmtree(self.temp_folder)
@staticmethod
def get_file_ending(filename) -> str:
"""Return the ending (e.g. ".shp") of a filename."""
return os.path.splitext(filename)[1]
@staticmethod
def find_shp_in_directory(dir_path: str) -> str:
"""
Return the path of the shapefile (*.shp) in a directory.
ATTENTION: This returns only the first occurrence found!
"""
shapefiles = glob.glob(os.path.join(dir_path, '*.shp'))
if len(shapefiles) > 0:
return shapefiles[0]
| gpl-3.0 |
oew1v07/scikit-image | doc/examples/plot_join_segmentations.py | 14 | 1967 | """
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.filters import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage.color import label2rgb
from skimage import data, img_as_float
coins = img_as_float(data.coins())
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30.0 / 255] = background
markers[coins > 150.0 / 255] = foreground
ws = watershed(edges, markers)
seg1 = ndi.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
seg2 = slic(coins, n_segments=117, max_iter=160, sigma=1, compactness=0.75,
multichannel=False)
# combine the two
segj = join_segmentations(seg1, seg2)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5))
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
color1 = label2rgb(seg1, image=coins, bg_label=0)
axes[1].imshow(color1, interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
color2 = label2rgb(seg2, image=coins, image_alpha=0.5)
axes[2].imshow(color2, interpolation='nearest')
axes[2].set_title('SLIC superpixels')
color3 = label2rgb(segj, image=coins, image_alpha=0.5)
axes[3].imshow(color3, interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
plt.show()
| bsd-3-clause |
fako/datascope | src/core/utils/data/text_features.py | 1 | 5503 | import os
import pickle
from importlib import import_module
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from scipy.sparse import vstack, hstack, save_npz, load_npz
class TextContentReader(object):
def __init__(self, get_identifier, get_text, content_callable):
self.get_identifier = get_identifier
self.get_text = get_text
self.content_callable = content_callable
self.content = None
self.identifiers = None
def __iter__(self):
self.content = self.content_callable()
self.identifiers = []
return self
def __next__(self):
entry = next(self.content)
self.identifiers.append(self.get_identifier(entry))
return self.get_text(entry)
class TextFeaturesFrame(object):
def __init__(self, get_identifier, get_text, content=None, file_path=None, language="en"):
self.get_identifier = get_identifier
self.get_text = get_text
self.language = language
# Initialize attributes used by this class
self.raw_data = None
self.vectorizer = None
self.data = None
self.content = None
self.features = None
self.identifiers = None
self.count_dtype = np.int16
# Fill actual data frame with content
if file_path:
self.from_disk(file_path)
elif content:
self.reset(content=content)
def from_disk(self, file_path):
file_name, ext = os.path.splitext(file_path)
self.raw_data = load_npz(file_path)
with open(file_name + ".voc", "rb") as vocab_file:
self.vectorizer = pickle.load(vocab_file)
self.identifiers = pd.read_pickle(file_name + ".pkl")
self.load_features(self.vectorizer)
self.load_data(self.raw_data)
def to_disk(self, file_path):
file_name, ext = os.path.splitext(file_path)
save_npz(file_path, self.raw_data)
with open(file_name + ".voc", "wb") as vocab_file:
pickle.dump(self.vectorizer, vocab_file)
self.identifiers.to_pickle(file_name + ".pkl")
def get_vectorizer(self):
stop_words_module = import_module("spacy.lang.{}.stop_words".format(self.language))
stop_words = list(getattr(stop_words_module, 'STOP_WORDS'))
return CountVectorizer(stop_words=stop_words)
def load_data(self, raw_data):
transformer = TfidfTransformer()
self.data = transformer.fit_transform(raw_data).tocsc()
self.data /= self.data.max() # min-max normalisation across columns with min=0
def load_content(self, content_callable=None):
if not self.vectorizer:
self.vectorizer = self.get_vectorizer()
should_fit = True
else:
should_fit = False
content_reader = TextContentReader(self.get_identifier, self.get_text, content_callable or self.content)
matrix = self.vectorizer.fit_transform(content_reader).tocsc() if should_fit \
else self.vectorizer.transform(content_reader).tocsc()
# Update existing data and deduplicate on index
self.raw_data = vstack([self.raw_data, matrix]) if self.raw_data is not None else matrix
if self.identifiers is None:
self.identifiers = pd.Series(content_reader.identifiers)
else:
new = pd.Series(content_reader.identifiers)
self.identifiers = self.identifiers.append(new) \
.drop_duplicates(keep="last") \
.reset_index(drop=True)
# Converting the data to dok_matrix should deduplicate values
# See: https://stackoverflow.com/questions/28677162/ignoring-duplicate-entries-in-sparse-matrix
self.raw_data = self.raw_data.tocoo().todok().tocsc()
self.load_data(self.raw_data)
if should_fit:
self.load_features(self.vectorizer)
def load_features(self, vectorizer):
self.features = {
feature: ix
for ix, feature in enumerate(vectorizer.get_feature_names())
}
def reset(self, content):
self.raw_data = None
self.content = content
self.vectorizer = None
self.load_content(content)
def score_by_params(self, params):
matrix = None
vector = []
for key, value in self.clean_params(params).items():
col = self.data.getcol(self.features[key])
matrix = hstack([matrix, col]) if matrix is not None else col
vector.append(value)
if matrix is None:
return None
vector = np.array(vector)
values = matrix.dot(vector)
return pd.Series(values, index=self.identifiers)
def clean_params(self, params):
cleaned = {}
for key, value in params.items():
# First check valid values
if isinstance(value, float):
pass
elif isinstance(value, int) or isinstance(value, str) and value.isnumeric():
value = float(value)
else:
continue
# Then check collisions and feature register
stripped = key.strip("$")
if stripped not in self.features:
continue
if stripped in cleaned:
raise ValueError("Collision of keys while cleaning params for {} and {}".format(stripped, key))
cleaned[stripped] = value
return cleaned
| gpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 43 | 3449 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
net = tf.layers.dropout(net, rate=0.1)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op.
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Explosound/ExplosoundCluster | pymir/Frame.py | 2 | 6656 | """
Frame class
ndarray subclass for time-series data
Last updated: 31 January 2014
"""
import math
from math import *
import numpy
import numpy.fft
from numpy import *
from numpy.lib import stride_tricks
import scipy
import matplotlib.pyplot as plt
import pymir
from pymir import Spectrum, Transforms
import pyaudio
class Frame(numpy.ndarray):
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
obj = numpy.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides,
order)
obj.sampleRate = 0
obj.channels = 1
obj.format = pyaudio.paFloat32
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(InfoArray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. InfoArray():
# obj is None
# (we're in the middle of the InfoArray.__new__
# constructor, and self.info will be set when we return to
# InfoArray.__new__)
if obj is None: return
# From view casting - e.g arr.view(InfoArray):
# obj is arr
# (type(obj) can be InfoArray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is InfoArray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# InfoArray.__new__ constructor, but also with
# arr.view(InfoArray).
self.sampleRate = getattr(obj, 'sampleRate', None)
self.channels = getattr(obj, 'channels', None)
self.format = getattr(obj, 'format', None)
# We do not need to return anything
#####################
# Frame methods
#####################
def cqt(self):
"""
Compute the Constant Q Transform (CQT)
"""
return Transforms.cqt(self)
def dct(self):
"""
Compute the Discrete Cosine Transform (DCT)
"""
return Transforms.dct(self)
def energy(self, windowSize = 256):
"""
Compute the energy of this frame
"""
N = len(self)
window = numpy.hamming(windowSize)
window.shape = (windowSize, 1)
n = N - windowSize #number of windowed samples.
# Create a view of signal who's shape is (n, windowSize). Use stride_tricks such that each stide jumps only one item.
p = numpy.power(self,2)
s = stride_tricks.as_strided(p,shape=(n, windowSize), strides=(self.itemsize, self.itemsize))
e = numpy.dot(s, window) / windowSize
e.shape = (e.shape[0], )
return e
def frames(self, frameSize, windowFunction = None):
"""
Decompose this frame into smaller frames of size frameSize
"""
frames = []
start = 0
end = frameSize
while start < len(self):
if windowFunction == None:
frames.append(self[start:end])
else:
window = windowFunction(frameSize)
window.shape = (frameSize, 1)
window = numpy.squeeze(window)
frame = self[start:end]
if len(frame) < len(window):
# Zero pad
frameType = frame.__class__.__name__
sampleRate = frame.sampleRate
channels = frame.channels
format = frame.format
diff = len(window) - len(frame)
frame = numpy.append(frame, [0] * diff)
if frameType == "AudioFile":
frame = frame.view(pymir.AudioFile)
else:
frame = frame.view(Frame)
# Restore frame properties
frame.sampleRate = sampleRate
frame.channels = channels
frame.format = format
windowedFrame = frame * window
frames.append(windowedFrame)
start = start + frameSize
end = end + frameSize
return frames
def framesFromOnsets(self, onsets):
"""
Decompose into frames based on onset start time-series
"""
frames = []
for i in range(0, len(onsets) - 1):
frames.append(self[onsets[i] : onsets[i + 1]])
return frames
def play(self):
"""
Play this frame through the default playback device using pyaudio (PortAudio)
Note: This is a blocking operation.
"""
# Create the stream
p = pyaudio.PyAudio()
stream = p.open(format = self.format, channels = self.channels, rate = self.sampleRate, output = True)
# Write the audio data to the stream
audioData = self.tostring()
stream.write(audioData)
# Close the stream
stream.stop_stream()
stream.close()
p.terminate()
def plot(self):
"""
Plot the frame using matplotlib
"""
plt.plot(self)
plt.xlim(0, len(self))
plt.ylim(-1.5, 1.5)
plt.show()
def rms(self):
"""
Compute the root-mean-squared amplitude
"""
sum = 0
for i in range(0, len(self)):
sum = sum + self[i] ** 2
sum = sum / (1.0 * len(self))
return math.sqrt(sum)
# Spectrum
def spectrum(self):
"""
Compute the spectrum using an FFT
Returns an instance of Spectrum
"""
return Transforms.fft(self)
def zcr(self):
"""
Compute the Zero-crossing rate (ZCR)
"""
zcr = 0
for i in range(1, len(self)):
if (self[i - 1] * self[i]) < 0:
zcr = zcr + 1
return zcr / (1.0 * len(self)) | mit |
Odingod/mne-python | examples/preprocessing/plot_define_target_events.py | 19 | 3350 | """
============================================================
Define target events based on time lag, plot evoked response
============================================================
This script shows how to define higher order events based on
time lag between reference and target events. For
illustration, we will put face stimuli presented into two
classes, that is 1) followed by an early button press
(within 590 milliseconds) and followed by a late button
press (later than 590 milliseconds). Finally, we will
visualize the evoked responses to both 'quickly-processed'
and 'slowly-processed' face stimuli.
"""
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.event import define_target_events
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads
# pick MEG channels
picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
include=include, exclude='bads')
###############################################################################
# Find stimulus event followed by quick button presses
reference_id = 5 # presentation of a smiley face
target_id = 32 # button press
sfreq = raw.info['sfreq'] # sampling rate
tmin = 0.1 # trials leading to very early responses will be rejected
tmax = 0.59 # ignore face stimuli followed by button press later than 590 ms
new_id = 42 # the new event id for a hit. If None, reference_id is used.
fill_na = 99 # the fill value for misses
events_, lag = define_target_events(events, reference_id, target_id,
sfreq, tmin, tmax, new_id, fill_na)
print(events_) # The 99 indicates missing or too late button presses
# besides the events also the lag between target and reference is returned
# this could e.g. be used as parametric regressor in subsequent analyses.
print(lag[lag != fill_na]) # lag in milliseconds
# #############################################################################
# Construct epochs
tmin_ = -0.2
tmax_ = 0.4
event_id = dict(early=new_id, late=fill_na)
epochs = mne.Epochs(raw, events_, event_id, tmin_,
tmax_, picks=picks, baseline=(None, 0),
reject=dict(mag=4e-12))
# average epochs and get an Evoked dataset.
early, late = [epochs[k].average() for k in event_id]
###############################################################################
# View evoked response
times = 1e3 * epochs.times # time in milliseconds
title = 'Evoked response followed by %s button press'
plt.clf()
ax = plt.subplot(2, 1, 1)
early.plot(axes=ax)
plt.title(title % 'late')
plt.ylabel('Evoked field (fT)')
ax = plt.subplot(2, 1, 2)
late.plot(axes=ax)
plt.title(title % 'early')
plt.ylabel('Evoked field (fT)')
plt.show()
| bsd-3-clause |
andnovar/ggplot | ggplot/tests/test_legend.py | 12 | 5181 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup, assert_same_elements
assert_same_ggplot = get_assert_same_ggplot(__file__)
from nose.tools import (assert_true, assert_raises, assert_is,
assert_is_not, assert_equal)
from ggplot import *
import six
import pandas as pd
from ggplot.components import assign_visual_mapping
from ggplot.utils.exceptions import GgplotError
def get_test_df():
df = pd.DataFrame({
'xmin': [1, 3, 5],
'xmax': [2, 3.5, 7],
'ymin': [1, 4, 6],
'ymax': [5, 5, 9],
'fill': ['blue', 'red', 'green'],
'quality': ['good', 'bad', 'ugly'],
'alpha': [0.1, 0.5, 0.9],
'texture': ['hard', 'soft', 'medium']})
return df
def test_legend_structure():
df = get_test_df()
gg = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df, gg.aesthetics, gg)
# All mapped aesthetics must have an entry in the legend
for aesthetic in ('color', 'fill', 'alpha', 'linetype'):
assert(aesthetic in legend)
# None of the unassigned aesthetic should have an entry in the legend
assert('size' not in legend)
assert('shape' not in legend)
# legend entries should remember the column names
# to which they were mapped
assert(legend['fill']['column_name'] == 'fill')
assert(legend['color']['column_name'] == 'quality')
assert(legend['linetype']['column_name'] == 'texture')
assert(legend['alpha']['column_name'] == 'alpha')
# Discrete columns for non-numeric data
assert(legend['fill']['scale_type'] == 'discrete')
assert(legend['color']['scale_type'] == 'discrete')
assert(legend['linetype']['scale_type'] == 'discrete')
assert(legend['alpha']['scale_type'] == 'discrete')
# Alternate
df2 = pd.DataFrame.copy(df)
df2['fill'] = [90, 3.2, 8.1]
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df2, gg.aesthetics, gg)
assert(legend['fill']['scale_type'] == 'discrete')
# Test if legend switches to continuous for more than 8 numerical values
df3 = pd.DataFrame({
'xmin': [1, 3, 5, 8, 2, 1, 4, 7, 9],
'xmax': [2, 3.5, 7, 12, 3, 2, 6, 8, 10],
'ymin': [1, 4, 6, 0, 0, 0, 0, 0, 0],
'ymax': [5, 5, 9, 1, 1, 1, 1, 1, 1],
'fill': ['blue', 'red', 'green', 'green', 'green',
'green', 'green', 'green', 'brown'],
'quality': ['good', 'bad', 'ugly', 'horrible', 'quite awful',
'impertinent', 'jolly', 'hazardous', 'ok'],
'alpha': [0.1, 0.2, 0.4, 0.5, 0.6, 0.65, 0.8, 0.82, 0.83],
'texture': ['hard', 'soft', 'medium', 'fluffy', 'slimy', 'rough',
'edgy', 'corny', 'slanted']
})
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df3, gg.aesthetics, gg)
assert(legend['alpha']['scale_type'] == 'continuous')
# Test if legend raises GgplotError when size and alpha is fed non numeric data
gg = ggplot(df3, aes(size="fill"))
assert_raises(GgplotError, assign_visual_mapping, df3, gg.aesthetics, gg)
gg = ggplot(df3, aes(alpha="fill"))
assert_raises(GgplotError, assign_visual_mapping, df3, gg.aesthetics, gg)
@cleanup
def test_alpha_rect():
df = get_test_df()
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
p += geom_rect(size=5)
assert_same_ggplot(p, "legend_alpha_rect")
@cleanup
def test_alpha():
diamonds["test"] = diamonds["clarity"].map(len)
p = ggplot(diamonds[::50], aes(x='carat', y='price', colour='test',
size='test', alpha='test'))
#p = ggplot(diamonds[1:60000:50], aes(x='carat', y='price', shape='clarity'))
p = p + geom_point() + ggtitle("Diamonds: A Plot")
p = p + xlab("Carat") + ylab("Price")
assert_same_ggplot(p, "legend_alpha")
@cleanup
def test_linetype():
meat_lng = pd.melt(meat[['date', 'beef', 'pork', 'broilers']], id_vars='date')
p = ggplot(aes(x='date', y='value', colour='variable',
linetype='variable', shape='variable'), data=meat_lng) + \
geom_line() + geom_point() +\
ylim(0, 3000)
assert_same_ggplot(p, "legend_linetype")
@cleanup
def test_shape_alpha():
diamonds["test"] = diamonds["clarity"].map(len)
df = diamonds[::50]
p = ggplot(df, aes(x='carat', y='price', colour='test', size='test',
alpha='test', shape='clarity')) + geom_point()
assert_same_ggplot(p, "legend_shape_alpha")
| bsd-2-clause |
moutai/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
pydata/pandas-gbq | docs/source/conf.py | 1 | 11167 | # -*- coding: utf-8 -*-
#
# pandas-gbq documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 8 10:52:12 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import datetime
import os
import sys
import pandas_gbq
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.todo",
"numpydoc", # used to parse numpy-style docstrings for autodoc
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"pandas-gbq"
copyright = u"2017-{}, PyData Development Team".format(
datetime.datetime.now().year
)
author = u"PyData Development Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pandas_gbq.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Taken from docs.readthedocs.io:
# on_rtd is whether we are on readthedocs.io
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'pandas-gbq v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pandas-gbqdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pandas-gbq.tex",
u"pandas-gbq Documentation",
u"PyData Development Team",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "pandas-gbq", u"pandas-gbq Documentation", [author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pandas-gbq",
u"pandas-gbq Documentation",
author,
"pandas-gbq",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Configuration for intersphinx:
intersphinx_mapping = {
"https://docs.python.org/": None,
"https://pandas.pydata.org/pandas-docs/stable/": None,
"https://pydata-google-auth.readthedocs.io/en/latest/": None,
"https://google-auth.readthedocs.io/en/latest/": None,
}
extlinks = {
"issue": ("https://github.com/pydata/pandas-gbq/issues/%s", "GH#"),
"pr": ("https://github.com/pydata/pandas-gbq/pull/%s", "GH#"),
}
| bsd-3-clause |
TomAugspurger/pandas | pandas/io/pickle.py | 1 | 6553 | """ pickle compat """
import pickle
from typing import Any, Optional
import warnings
from pandas._typing import FilePathOrBuffer
from pandas.compat import pickle_compat as pc
from pandas.io.common import get_filepath_or_buffer, get_handle
def to_pickle(
obj: Any,
filepath_or_buffer: FilePathOrBuffer,
compression: Optional[str] = "infer",
protocol: int = pickle.HIGHEST_PROTOCOL,
):
"""
Pickle (serialize) object to file.
Parameters
----------
obj : any object
Any python object.
filepath_or_buffer : str, path object or file-like object
File path, URL, or buffer where the pickled object will be stored.
.. versionchanged:: 1.0.0
Accept URL. URL has to be of S3 or GCS.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
If 'infer' and 'path_or_url' is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression) If 'infer' and 'path_or_url' is not path-like, then use
None (= no decompression).
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For Python
2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.
For Python >= 3.4, 4 is a valid value. A negative value for the
protocol parameter is equivalent to setting its value to
HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
filepath_or_buffer, compression=compression, mode="wb"
)
if not isinstance(fp_or_buf, str) and compression == "infer":
compression = None
f, fh = get_handle(fp_or_buf, "wb", compression=compression, is_text=False)
if protocol < 0:
protocol = pickle.HIGHEST_PROTOCOL
try:
f.write(pickle.dumps(obj, protocol=protocol))
finally:
f.close()
for _f in fh:
_f.close()
if should_close:
try:
fp_or_buf.close()
except ValueError:
pass
def read_pickle(
filepath_or_buffer: FilePathOrBuffer, compression: Optional[str] = "infer"
):
"""
Load pickled pandas object (or any object) from file.
.. warning::
Loading pickled data received from untrusted sources can be
unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
File path, URL, or buffer where the pickled object will be loaded from.
.. versionchanged:: 1.0.0
Accept URL. URL is not limited to S3 and GCS.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
If 'infer' and 'path_or_url' is path-like, then detect compression from
the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression) If 'infer' and 'path_or_url' is not path-like, then use
None (= no decompression).
Returns
-------
unpickled : same type as object stored in file
See Also
--------
DataFrame.to_pickle : Pickle (serialize) DataFrame object to file.
Series.to_pickle : Pickle (serialize) Series object to file.
read_hdf : Read HDF5 file into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
read_parquet : Load a parquet object, returning a DataFrame.
Notes
-----
read_pickle is only guaranteed to be backwards compatible to pandas 0.20.3.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> pd.to_pickle(original_df, "./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
filepath_or_buffer, compression=compression
)
if not isinstance(fp_or_buf, str) and compression == "infer":
compression = None
f, fh = get_handle(fp_or_buf, "rb", compression=compression, is_text=False)
# 1) try standard library Pickle
# 2) try pickle_compat (older pandas version) to handle subclass changes
# 3) try pickle_compat with latin-1 encoding upon a UnicodeDecodeError
try:
excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError)
# TypeError for Cython complaints about object.__new__ vs Tick.__new__
try:
with warnings.catch_warnings(record=True):
# We want to silence any warnings about, e.g. moved modules.
warnings.simplefilter("ignore", Warning)
return pickle.load(f)
except excs_to_catch:
# e.g.
# "No module named 'pandas.core.sparse.series'"
# "Can't get attribute '__nat_unpickle' on <module 'pandas._libs.tslib"
return pc.load(f, encoding=None)
except UnicodeDecodeError:
# e.g. can occur for files written in py27; see GH#28645 and GH#31988
return pc.load(f, encoding="latin-1")
finally:
f.close()
for _f in fh:
_f.close()
if should_close:
try:
fp_or_buf.close()
except ValueError:
pass
| bsd-3-clause |
kubeflow/kfp-tekton | components/XGBoost/Predict/from_ApacheParquet/component.py | 1 | 1562 | from kfp.components import InputPath, OutputPath, create_component_from_func
def xgboost_predict(
data_path: InputPath('ApacheParquet'),
model_path: InputPath('XGBoostModel'),
predictions_path: OutputPath('Text'),
label_column_name: str = None,
):
'''Make predictions using a trained XGBoost model.
Args:
data_path: Path for the feature data in Apache Parquet format.
model_path: Path for the trained model in binary XGBoost format.
predictions_path: Output path for the predictions.
label_column_name: Optional. Name of the column containing the label data that is excluded during the prediction.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
import numpy
import pandas
import xgboost
# Loading data
df = pandas.read_parquet(data_path)
if label_column_name:
df = df.drop(columns=[label_column_name])
evaluation_data = xgboost.DMatrix(
data=df,
)
# Training
model = xgboost.Booster(model_file=model_path)
predictions = model.predict(evaluation_data)
Path(predictions_path).parent.mkdir(parents=True, exist_ok=True)
numpy.savetxt(predictions_path, predictions)
if __name__ == '__main__':
create_component_from_func(
xgboost_predict,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=[
'xgboost==1.1.1',
'pandas==1.0.5',
'pyarrow==0.17.1',
]
)
| apache-2.0 |
clemkoa/scikit-learn | examples/mixture/plot_gmm_pdf.py | 140 | 1521 | """
=========================================
Density Estimation for a Gaussian mixture
=========================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20., 30.)
y = np.linspace(-20., 40.)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/mixture/plot_gmm_classifier.py | 7 | 3893 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import pylab as pl
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
pl.figure(figsize=(3 * n_classifiers / 2, 6))
pl.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.iteritems()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = pl.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
pl.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
pl.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
pl.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
pl.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
pl.xticks(())
pl.yticks(())
pl.title(name)
pl.legend(loc='lower right', prop=dict(size=12))
pl.show()
| bsd-3-clause |
drusk/pml | setup.py | 1 | 2075 | # Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Project setup.
@author: drusk
"""
from setuptools import setup
setup(
name="pml",
version="0.0.1",
author="David Rusk",
author_email="[email protected]",
packages=["pml", "pml.interactive", "pml.data", "pml.supervised",
"pml.supervised.decision_trees", "pml.unsupervised",
"pml.tools", "pml.utils"],
# Allows sample data to be loaded in shell
package_data={"pml.interactive": ["sample_data/*"]},
scripts=["scripts/pml"],
url="http://github.com/drusk/pml",
license="LICENSE",
description="Simple interface to Python machine learning algorithms.",
long_description=open("README.rst").read(),
install_requires=[
"ipython >= 0.11",
"pandas >= 0.14.1",
"matplotlib",
"numpy >= 1.6.1",
]
)
| mit |
PeterHuang2015/BITDM | python/kaggle/Titanic/myfirstforest.py | 26 | 4081 | """ Writing my first randomforest code.
Author : AstroDave
Date : 23rd September 2012
Revised: 15 April 2014
please see packages.python.org/milk/randomforests.html for more
"""
import pandas as pd
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
# Data cleanup
# TRAIN DATA
train_df = pd.read_csv('train.csv', header=0) # Load the train file into a dataframe
# I need to convert all strings to integer classifiers.
# I need to fill in the missing values of the data and make it complete.
# female = 0, Male = 1
train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# Note this is not ideal: in translating categories to numbers, Port "2" is not 2 times greater than Port "1", etc.
# All missing Embarked -> just make them embark from most common place
if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0:
train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values
Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked,
Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index
train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int
# All the ages with no data -> make the median of all Ages
median_age = train_df['Age'].dropna().median()
if len(train_df.Age[ train_df.Age.isnull() ]) > 0:
train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# TEST DATA
test_df = pd.read_csv('test.csv', header=0) # Load the test file into a dataframe
# I need to do the same with the test data now, so that the columns are the same as the training data
# I need to convert all strings to integer classifiers:
# female = 0, Male = 1
test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# All missing Embarked -> just make them embark from most common place
if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0:
test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values
# Again convert all Embarked strings to int
test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int)
# All the ages with no data -> make the median of all Ages
median_age = test_df['Age'].dropna().median()
if len(test_df.Age[ test_df.Age.isnull() ]) > 0:
test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age
# All the missing Fares -> assume median of their respective class
if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0:
median_fare = np.zeros(3)
for f in range(0,3): # loop 0 to 2
median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median()
for f in range(0,3): # loop 0 to 2
test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f]
# Collect the test data's PassengerIds before dropping it
ids = test_df['PassengerId'].values
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# The data is now ready to go. So lets fit to the train, then predict to the test!
# Convert back to a numpy array
train_data = train_df.values
test_data = test_df.values
print 'Training...'
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit( train_data[0::,1::], train_data[0::,0] )
print 'Predicting...'
output = forest.predict(test_data).astype(int)
predictions_file = open("myfirstforest.csv", "wb")
open_file_object = csv.writer(predictions_file)
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
predictions_file.close()
print 'Done.'
| mit |
jpopham91/berserker | berserker/estimators/meta.py | 1 | 1349 | __author__ = 'jake'
from sklearn.base import BaseEstimator
import numpy as np
class Averager(BaseEstimator):
"""
Simple meta-estimator which averages predictions
May use any of the pythagorean means
"""
class StepwiseRegressor(Averager):
"""
An averager which iteratively adds predictions which optimize a metric
"""
class FeatureWeightedEstimator(BaseEstimator):
"""
Expands the feature space by taking the outer product of the features and predictions at each sample
This is then fit using some estimator (log/lin regression)
"""
def __init__(self, estimator):
self.estimator = estimator
@staticmethod
def _combine_features(X, y_pred):
Xy = np.empty_like((X.shape[0], X.shape[1]*y_pred.shape[1]))
for i in X.shape[1]:
for j in y_pred.shape[1]:
Xy[:, i*X.shape[0]+j] = X[i]*y_pred[j]
return Xy
def fit(self, X, y_pred, y_true):
"""Takes the feature vectors AND predictions as training data"""
assert X.shape[0] == y_pred.shape[0] == len(y_true)
Xy = self._combine_features(X, y_pred)
self.estimator.fit(Xy, y_true)
def predict(self, X, y_pred):
assert X.shape[0] == y_pred.shape[0]
Xy = self._combine_features(X, y_pred)
return self.estimator._predict_all(Xy) | mit |
evanbiederstedt/RRBSfun | scripts/PDR_GenomicRegions_mcell67_88.py | 1 | 7713 |
import glob
import pandas as pd
import numpy as np
import os
os.chdir('/Users/evanbiederstedt/Downloads/annoOld_files')
# set glob subdirectory via cell batch
normalB_cellbatch_mcell = glob.glob("RRBS_NormalBCD19pCD27mcell67_88*.anno")
newdf1 = pd.DataFrame()
for filename in normalB_cellbatch_mcell:
df = pd.read_table(filename)
df['filename'] = str(filename)
df = df.drop(['chr', 'start', 'strand', 'thisMeth', 'thisUnmeth', 'avgWeightedEnt', 'CpGEntropy',
'avgReadCpGs', 'tss', 'tssDistance', 'genes', 'exons', 'introns', 'promoter', 'cgi',
'geneDensity', 'ctcfUpstream', 'ctcfDownstream','ctcfDensity', 'geneDistalRegulatoryModules',
'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance','3PrimeUTRDistance',
'5PrimeUTR', '5PrimeUTRDistance', 'firstExon','geneDistalRegulatoryModulesK562',
'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance'], axis=1)
df['total_reads'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df['totreads_genesDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['genesDistance']<0, 0)
df['totreads_exonsDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['exonsDistance']<0, 0)
df['totreads_intronsDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['intronsDistance']<0, 0)
df['totreads_promoterDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['promoterDistance']<0, 0)
df['totreads_cgiDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['cgiDistance']<0, 0)
df['totreads_ctcfDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['ctcfDistance']<0, 0)
df['totreads_geneDistalRegulatoryModulesDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['geneDistalRegulatoryModulesDistance']<0, 0)
df['totreads_firstExonDistance'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1).where(df['firstExonDistance']<0, 0)
df['mixedReads_genesDistance'] = np.where(df['genesDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_exonsDistance'] = np.where(df['exonsDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_intronsDistance'] = np.where(df['intronsDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_promoterDistance'] = np.where(df['promoterDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_cgiDistance'] = np.where(df['cgiDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_ctcfDistance'] = np.where(df['ctcfDistance']<0, df['mixedReadCount'], 0)
df['mixedReads_geneDistalRegulatoryModulesDistance'] = np.where(df['geneDistalRegulatoryModulesDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_vistaEnhancersDistance'] = np.where(df['vistaEnhancersDistance'] <0, df['mixedReadCount'], 0)
df['mixedReads_firstExonDistance'] = np.where(df['firstExonDistance'] <0, df['mixedReadCount'], 0)
df['fullMethReads_genesDistance'] = np.where(df['genesDistance']<0, df['methReadCount'], 0)
df['fullMethReads_exonsDistance'] = np.where(df['exonsDistance']<0, df['methReadCount'], 0)
df['fullMethReads_intronsDistance'] = np.where(df['intronsDistance']<0, df['methReadCount'], 0)
df['fullMethReads_promoterDistance'] = np.where(df['promoterDistance']<0, df['methReadCount'], 0)
df['fullMethReads_cgiDistance'] = np.where(df['cgiDistance']<0, df['methReadCount'], 0)
df['fullMethReads_ctcfDistance'] = np.where(df['ctcfDistance']<0, df['methReadCount'], 0)
df['fullMethReads_geneDistalRegulatoryModulesDistance'] = np.where(df['geneDistalRegulatoryModulesDistance'] <0, df['methReadCount'], 0)
df['fullMethReads_vistaEnhancersDistance'] = np.where(df['vistaEnhancersDistance'] <0, df['methReadCount'], 0)
df['fullMethReads_firstExonDistance'] = np.where(df['firstExonDistance'] <0, df['methReadCount'], 0)
df = df.sum()
df['filename'] = str(filename)
df['PDR_total'] = df['mixedReadCount']/df['total_reads']
df['PDR_GenesBody'] = df['mixedReads_genesDistance']/df['totreads_genesDistance']
df['PDR_Exons'] = df['mixedReads_exonsDistance']/df['totreads_exonsDistance']
df['PDR_Introns'] = df['mixedReads_intronsDistance']/df['totreads_intronsDistance']
df['PDR_Promoters'] = df['mixedReads_promoterDistance']/df['totreads_promoterDistance']
df['PDR_CGIslands'] = df['mixedReads_cgiDistance']/df['totreads_cgiDistance']
df['PDR_CTCF'] = df['mixedReads_ctcfDistance']/df['totreads_ctcfDistance']
df['PDR_Enhancer'] = df['mixedReads_geneDistalRegulatoryModulesDistance']/df['totreads_geneDistalRegulatoryModulesDistance']
df['percent_totalMeth'] = df['methReadCount']/df['total_reads']
df['totalMeth_GenesBody'] = df['fullMethReads_genesDistance']/df['totreads_genesDistance']
df['totalMeth_Exons'] = df['fullMethReads_exonsDistance']/df['totreads_exonsDistance']
df['totalMeth_Introns'] = df['fullMethReads_intronsDistance']/df['totreads_intronsDistance']
df['totalMeth_Promoters'] = df['fullMethReads_promoterDistance']/df['totreads_promoterDistance']
df['totalMeth_CGIslands'] = df['fullMethReads_cgiDistance']/df['totreads_cgiDistance']
df['totalMeth_CTCF'] = df['fullMethReads_ctcfDistance']/df['totreads_ctcfDistance']
df['totalMeth_Enhancer'] = df['fullMethReads_geneDistalRegulatoryModulesDistance']/df['totreads_geneDistalRegulatoryModulesDistance']
newdf1 = newdf1.append(df, ignore_index=True)
# export as .csv
newdf1.to_csv('PDR_genomicRegions_RRBS_NormalBCD19pCD27mcell67_88.csv')
################################################
################################################
#
# NOTE: We need to recalculate for annotations
#
# Dataframe headers versus labels in Landau et al (2014)
#
# 'tssDistance' // always seems to be a zero value---why?
# 'genesDistance' = 'Genes Body'
# 'exonsDistance' = 'Exons'
# 'intronsDistance' = 'Introns'
# 'promoterDistance' = 'Promoters'
# 'cgiDistance' = 'CG Islands'
# 'ctcfDistance' = 'CTCF binding site density'
# 'ctcfUpDistance'
# 'ctcfDownDistance'
# 'geneDistalRegulatoryModulesDistance' = 'Enhancer'
# 'vistaEnhancersDistance' = // ignore
# 'firstExonDistance'
#
###############
# QUESTIONS
###############
#
# Question (1) Calculating GCI shores and shelves is very tricky, as one must know the exact GCI boundaries
# e.g.
# if GCI distance is -1, this is included in GCIshore up [0 to 2000]
# if GCI distance is -2001, this is included in GCIshelf up [2000 to 4000]
#
# One cannot do this:
# df['GCIshoreUp'] = df['cgiDistance'] + 2000
# df['GCIshoreDown'] = df['cgiDistance'] - 2000
# df['GCIshelfUp'] = df['cgiDistance'] + 4000
# df['GCIshelfDown'] = df['cgiDistance'] - 4000
# as you are using 'cgiDistance' to be both the left boundary and the right boundary
#
# Question (2) How to calculate "Intergenic"?
#
# Question (3) What's up with 'tssDistance'?
#
| mit |
RobertABT/heightmap | build/scipy/doc/sphinxext/numpydoc/tests/test_docscrape.py | 39 | 18326 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
| mit |
alphacsc/alphacsc | examples/csc/plot_cross_frequency_coupling.py | 1 | 3801 | """
==================================================================
Extracting cross-frequency coupling waveforms from rodent LFP data
==================================================================
This example illustrates how to learn univariate atoms on a univariate
time-serie. The data is a single LFP channel recorded on a rodent's striatum
[1]_. Interestingly in this time-serie, the high frequency oscillations around
80 Hz are modulated in amplitude by the low-frequency oscillation around 3 Hz,
a phenomenon known as cross-frequency coupling (CFC).
The convolutional sparse coding (CSC) model is able to learn the prototypical
waveforms of the signal, on which we can clearly see the CFC.
.. [1] G. Dallérac, M. Graupner, J. Knippenberg, R. C. R. Martinez,
T. F. Tavares, L. Tallot, N. El Massioui, A. Verschueren, S. Höhn,
J.B. Bertolus, et al. Updating temporal expectancy of an aversive event
engages striatal plasticity under amygdala control.
Nature Communications, 8:13920, 2017
"""
# Authors: Tom Dupre La Tour <[email protected]>
# Mainak Jas <[email protected]>
# Umut Simsekli <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
###############################################################################
# Let us first load the data sample.
import mne
import numpy as np
import matplotlib.pyplot as plt
# sample frequency
sfreq = 350.
# We load the signal. It is an LFP channel recorded on a rodent's striatum.
data = np.load('../rodent_striatum.npy')
print(data.shape)
###############################################################################
# As the data contains severe artifacts between t=0 and t=100, we use a
# section not affected by artifacts.
data = data[:, 35000:]
# We also remove the slow drift, which accounts for a lot of variance.
data = mne.filter.filter_data(data, sfreq, 1, None)
# To make the most of parallel computing, we split the data into trials.
data = data.reshape(50, -1)
data /= data.std()
###############################################################################
# This sample contains CFC between 3 Hz and 80 Hz. This phenomenon can be
# described with a comodulogram, computed for instance with the `pactools
# <http://pactools.github.io/>`_ Python library.
from pactools import Comodulogram
comod = Comodulogram(fs=sfreq, low_fq_range=np.arange(0.2, 10.2, 0.2),
low_fq_width=2., method='duprelatour')
comod.fit(data)
comod.plot()
plt.show()
###############################################################################
# We fit a CSC model on the data.
from alphacsc import learn_d_z
params = dict(
n_atoms=3,
n_times_atom=int(sfreq * 1.0), # 1000. ms
reg=5.,
n_iter=10,
solver_z='l-bfgs',
solver_z_kwargs=dict(factr=1e9),
solver_d_kwargs=dict(factr=1e2),
random_state=42,
n_jobs=5,
verbose=1)
_, _, d_hat, z_hat, _ = learn_d_z(data, **params)
###############################################################################
# Plot the temporal patterns. Interestingly, we obtain prototypical
# waveforms of the signal on which we can clearly see the CFC.
n_atoms, n_times_atom = d_hat.shape
n_columns = min(6, n_atoms)
n_rows = int(np.ceil(n_atoms // n_columns))
figsize = (4 * n_columns, 3 * n_rows)
fig, axes = plt.subplots(n_rows, n_columns, figsize=figsize, sharey=True)
axes = axes.ravel()
for kk in range(n_atoms):
ax = axes[kk]
time = np.arange(n_times_atom) / sfreq
ax.plot(time, d_hat[kk], color='C%d' % kk)
ax.set_xlim(0, n_times_atom / sfreq)
ax.set(xlabel='Time (sec)', title="Temporal pattern %d" % kk)
ax.grid(True)
fig.tight_layout()
plt.show()
| bsd-3-clause |
yinzhao0312/Position-predict | yin/ensemble/Mposi_predict.py | 1 | 3869 | # coding = utf-8
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
import pandas as pd
import numpy as np
import codecs
import os
from yin.data_deal import common_data_deal as dl
import pickle
# import xgboost as xgb
path = os.path.dirname(__file__) + '/predictFile/'
outFile1 = codecs.open(path + 'posi_rf.txt', 'w', 'utf-8')
outFile2 = codecs.open(path + 'posi_gbdt.txt', 'w', 'utf-8')
outPkl1 = file(path + 'rfPosiProb.pkl', 'w')
outPkl2 = file(path + 'gbdtPosiProb.pkl', 'w')
inPklFile = file('feature_pkl.pkl', 'r')
trainFeatureAll, testFeatureAll = pickle.load(inPklFile)
# answer = pickle.load(answerFile)
feature_num = [str(i) for i in range(40)]
feature_str = ['age', 'gender', 'comSize1', 'comSize3', 'comSizeN', 'salary1', 'salary3',
'time1', 'time2', 'time3', 'time4', 'yearN', 'firstAge', 'posi1List',
'lengthList', 'lv1', 'lv3', 'aver1', 'aver3', 'year2',
'industry1Num', 'industry3Num', 'salaryLv1List', 'salaryLv3List',
'yearSalary1List', 'yearSalary3List'] + feature_num
def merge_feature(all_feature):
train_feature = {}
# testFeature = {}
for string in all_feature:
train_feature[string] = trainFeatureAll[string]
# testFeature[str] = testFeatureAll[str]
train_feature = pd.DataFrame(train_feature)
# testFeature = pd.DataFrame(testFeature)
train_id_list = trainFeatureAll['id']
# testIdList = testFeatureAll['id']
train_tar_list = trainFeatureAll['posi2List']
return train_feature, train_id_list, train_tar_list
if __name__ == '__main__':
trainFeatureR, trainIdListR, trainTarListR = merge_feature(feature_str)
tmp = [t < 32 for t in trainTarListR]
tmp = np.array(tmp)
trainFeatureR = trainFeatureR[tmp]
targetR = np.array(trainTarListR)
targetR = targetR[tmp]
trainIdListR = np.array(trainIdListR)
trainIdListR = trainIdListR[tmp]
Cfeature = trainFeatureR.columns[:]
tt = []
rfPro, gbPro = [], []
tmp = []
for i in range(len(trainFeatureR)):
tt.append(i % 5)
i = 4
tmp1 = np.array([t != i for t in tt])
tmp2 = np.array([t == i for t in tt])
trainFeature, testFeature = trainFeatureR[tmp1], trainFeatureR[tmp2]
trainTar, testTar = targetR[tmp1], targetR[tmp2]
trainId, testId = trainIdListR[tmp1], trainIdListR[tmp2]
clf = RandomForestClassifier(n_estimators=200, min_samples_split=17)
clf.fit(trainFeature[Cfeature], trainTar)
preds = clf.predict(testFeature)
predPro = clf.predict_proba(testFeature)
rfPro = predPro
right = 0
for n in range(len(preds)):
preName = dl.get_num_position(preds[n])
real = dl.get_num_position(testTar[n])
if preName == real:
right += 1.0
outFile1.write(str(testId[n]) + '\t' + preName + '\t' + real + '\n')
print right / (len(trainFeatureR) / 5.0)
pickle.dump(rfPro, outPkl1)
i = 4
print i
tmp1 = np.array([t != i for t in tt])
tmp2 = np.array([t == i for t in tt])
trainFeature, testFeature = trainFeatureR[tmp1], trainFeatureR[tmp2]
trainTar, testTar = targetR[tmp1], targetR[tmp2]
trainId, testId = trainIdListR[tmp1], trainIdListR[tmp2]
clf = GradientBoostingClassifier(n_estimators=6, min_samples_split=17)
clf.fit(trainFeature[Cfeature], trainTar)
preds = clf.predict(testFeature)
predPro = clf.predict_proba(testFeature)
gbPro = predPro
for n in range(len(preds)):
preName = dl.get_num_position(preds[n])
real = dl.get_num_position(testTar[n])
if preName == real:
right += 1.0
outFile2.write(testId[n] + '\t' + preName + '\t' + real + '\n')
print right / (70000 / 5.0)
pickle.dump(predPro, outPkl2)
outPkl1.close()
outPkl2.close()
outFile1.close()
outFile2.close()
| gpl-2.0 |
waterponey/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
joegomes/deepchem | deepchem/models/multitask.py | 1 | 6044 | """
Convenience class that lets singletask models fit on multitask data.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sklearn
import tempfile
import numpy as np
import shutil
from deepchem.utils.save import log
from deepchem.models import Model
from deepchem.data import DiskDataset
from deepchem.trans import undo_transforms
class SingletaskToMultitask(Model):
"""
Convenience class to let singletask models be fit on multitask data.
Warning: This current implementation is only functional for sklearn models.
"""
def __init__(self, tasks, model_builder, model_dir=None, verbose=True):
self.tasks = tasks
if model_dir is not None:
if not os.path.exists(model_dir):
os.makedirs(model_dir)
else:
model_dir = tempfile.mkdtemp()
self.model_dir = model_dir
self.task_model_dirs = {}
self.model_builder = model_builder
self.verbose = True
log("About to initialize singletask to multitask model", self.verbose)
for task in self.tasks:
task_model_dir = os.path.join(self.model_dir, str(task))
if not os.path.exists(task_model_dir):
os.makedirs(task_model_dir)
log("Initializing directory for task %s" % task, self.verbose)
self.task_model_dirs[task] = task_model_dir
def _create_task_datasets(self, dataset):
"""Make directories to hold data for tasks"""
task_data_dirs = []
for task in self.tasks:
task_data_dir = os.path.join(self.model_dir, str(task) + "_data")
if os.path.exists(task_data_dir):
shutil.rmtree(task_data_dir)
os.makedirs(task_data_dir)
task_data_dirs.append(task_data_dir)
task_datasets = self._to_singletask(dataset, task_data_dirs)
for task, task_dataset in zip(self.tasks, task_datasets):
log("Dataset for task %s has shape %s"
% (task, str(task_dataset.get_shape())), self.verbose)
return task_datasets
@staticmethod
def _to_singletask(dataset, task_dirs):
"""Transforms a multitask dataset to a collection of singletask datasets."""
tasks = dataset.get_task_names()
assert len(tasks) == len(task_dirs)
log("Splitting multitask dataset into singletask datasets", dataset.verbose)
task_datasets = [DiskDataset.create_dataset([], task_dirs[task_num], [task])
for (task_num, task) in enumerate(tasks)]
#task_metadata_rows = {task: [] for task in tasks}
for shard_num, (X, y, w, ids) in enumerate(dataset.itershards()):
log("Processing shard %d" % shard_num, dataset.verbose)
basename = "dataset-%d" % shard_num
for task_num, task in enumerate(tasks):
log("\tTask %s" % task, dataset.verbose)
w_task = w[:, task_num]
y_task = y[:, task_num]
# Extract those datapoints which are present for this task
X_nonzero = X[w_task != 0]
num_datapoints = X_nonzero.shape[0]
y_nonzero = np.reshape(y_task[w_task != 0], (num_datapoints, 1))
w_nonzero = np.reshape(w_task[w_task != 0], (num_datapoints, 1))
ids_nonzero = ids[w_task != 0]
task_datasets[task_num].add_shard(X_nonzero, y_nonzero, w_nonzero,
ids_nonzero)
return task_datasets
def fit(self, dataset, **kwargs):
"""
Updates all singletask models with new information.
Warning: This current implementation is only functional for sklearn models.
"""
if not isinstance(dataset, DiskDataset):
raise ValueError('SingletaskToMultitask only works with DiskDatasets')
log("About to create task-specific datasets", self.verbose)
task_datasets = self._create_task_datasets(dataset)
for ind, task in enumerate(self.tasks):
log("Fitting model for task %s" % task, self.verbose)
task_model = self.model_builder(
self.task_model_dirs[task])
task_model.fit(task_datasets[ind], **kwargs)
task_model.save()
def predict_on_batch(self, X):
"""
Concatenates results from all singletask models.
"""
n_tasks = len(self.tasks)
n_samples = X.shape[0]
y_pred = np.zeros((n_samples, n_tasks))
for ind, task in enumerate(self.tasks):
task_model = self.model_builder(self.task_model_dirs[task])
task_model.reload()
y_pred[:, ind] = task_model.predict_on_batch(X)
return y_pred
def predict(self, dataset, transformers=[]):
"""
Prediction for multitask models.
"""
n_tasks = len(self.tasks)
n_samples = len(dataset)
y_pred = np.zeros((n_samples, n_tasks))
for ind, task in enumerate(self.tasks):
task_model = self.model_builder(self.task_model_dirs[task])
task_model.reload()
y_pred[:, ind] = task_model.predict(dataset, [])
y_pred = undo_transforms(y_pred, transformers)
return y_pred
def predict_proba_on_batch(self, X, n_classes=2):
"""
Concatenates results from all singletask models.
"""
n_tasks = len(self.tasks)
n_samples = X.shape[0]
y_pred = np.zeros((n_samples, n_tasks, n_classes))
for ind, task in enumerate(self.tasks):
task_model = self.model_builder(self.task_model_dirs[task])
task_model.reload()
y_pred[:, ind] = task_model.predict_proba_on_batch(X)
return y_pred
def predict_proba(self, dataset, transformers=[], n_classes=2):
"""
Concatenates results from all singletask models.
"""
n_tasks = len(self.tasks)
n_samples = len(dataset)
y_pred = np.zeros((n_samples, n_tasks, n_classes))
for ind, task in enumerate(self.tasks):
task_model = self.model_builder(self.task_model_dirs[task])
task_model.reload()
y_pred[:, ind] = np.squeeze(task_model.predict_proba(
dataset, transformers, n_classes))
return y_pred
def save(self):
"""Save all models
TODO(rbharath): Saving is not yet supported for this model.
"""
pass
def reload(self):
"""Load all models"""
# Loading is done on-the-fly
pass
| mit |
Garrett-R/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
clemkoa/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 59 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
# #############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
# #############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
davidbroadwater/nyc-subway-datascience-project | project_4/exercise_1/data_visualization.py | 1 | 3560 | from pandas import *
from ggplot import *
def plot_weather_data(turnstile_weather):
'''
You are passed in a dataframe called turnstile_weather.
Use turnstile_weather along with ggplot to make a data visualization
focused on the MTA and weather data we used in assignment #3.
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like.
Here are some suggestions for things to investigate and illustrate:
* Ridership by time of day or day of week
* How ridership varies based on Subway station
* Which stations have more exits or entries at different times of day
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
To see all the columns and data points included in the turnstile_weather
dataframe.
However, due to the limitation of our Amazon EC2 server, we are giving you about 1/3
of the actual data in the turnstile_weather dataframe
'''
'''
Things I need to convert/create:
-
* Ridership by time of day on weekday vs weekend
Group EXITSn_hourly into 3-hour bins
* Ridership by day of week versus temperature (above/below average) and rain
Convert dates to days of the week
Group by greater than average (global average for simplicity?) temperature or by rain
Sum EXITSn_hourly into days of the week
'''
def reformat_subway_dates(date):
date_formatted = int(datetime.strftime(datetime.strptime(date, "%Y-%m-%d"),'%w'))
return date_formatted
set_option('chained_assignment', None)
turnstile_weather['day_of_week'] = turnstile_weather['DATEn'].map(reformat_subway_dates)
daily_averages = turnstile_weather.groupby(['day_of_week','rain'])['ENTRIESn_hourly'].mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe?rq=1
daily_averages = daily_averages.reset_index()#
plot = ggplot(aes(x='day_of_week',y='ENTRIESn_hourly', color='rain'),data=daily_averages) + \
geom_point() + \
geom_line() + \
ggtitle('Average NYC Subway Ridership by Day and Rain (Blue) vs No Rain (Red)') + \
xlab('Day of the Week') + \
ylab('Average Number of Riders') +\
xlim(-0.1, 6.1) +\
scale_x_continuous( labels=("","Sun","Mon", "Tue", "Wed", "Thu", "Fri", "Sat"))
#daygrouping = turnstile_weather.groupby(['day_of_week'])
#daygrouping_entries = daygrouping ['ENTRIESn_hourly']
#daily_averages = daygrouping_entries.mean()
plot = ggplot(turnstile_weather, aes('day_of_week','EXITSn_hourly')) + geom_bar(stat = "bar")+ \
ggtitle('NYC Subway Ridership by Hour') + xlab('Hour') + ylab('Number of Riders')
plot = ggplot(turnstile_weather, aes('Hour','EXITSn_hourly')) + geom_bar(stat = "bar")+ \
ggtitle('NYC Subway Ridership by Hour') + xlab('Hour') + ylab('Number of Riders')
return plot
if __name__ == "__main__":
image = "plot.png"
with open(image, "wb") as f:
turnstile_weather = pd.read_csv(input_filename)
turnstile_weather['datetime'] = turnstile_weather['DATEn'] + ' ' + turnstile_weather['TIMEn']
gg = plot_weather_data(turnstile_weather)
ggsave(f, gg)
| mit |
Mirantis/disk_perf_test_tool | scripts/postprocessing/bottleneck.py | 1 | 17445 | """ Analize test results for finding bottlenecks """
import re
import sys
import csv
import time
import bisect
import os.path
import argparse
import collections
import yaml
import texttable
try:
import pygraphviz as pgv
except ImportError:
pgv = None
sys.path.append("/mnt/other/work/disk_perf_test_tool")
from wally.run_test import load_data_from
from wally.utils import b2ssize, b2ssize_10
class SensorInfo(object):
def __init__(self, name, print_name, native_ext, to_bytes_coef):
self.name = name
self.print_name = print_name
self.native_ext = native_ext
self.to_bytes_coef = to_bytes_coef
_SINFO = [
SensorInfo('recv_bytes', 'net_recv', 'B', 1),
SensorInfo('send_bytes', 'net_send', 'B', 1),
SensorInfo('sectors_written', 'hdd_write', 'Sect', 512),
SensorInfo('sectors_read', 'hdd_read', 'Sect', 512),
SensorInfo('reads_completed', 'read_op', 'OP', None),
SensorInfo('writes_completed', 'write_op', 'OP', None),
SensorInfo('procs_blocked', 'blocked_procs', 'P', None),
]
SINFO_MAP = dict((sinfo.name, sinfo) for sinfo in _SINFO)
to_bytes = dict((sinfo.name, sinfo.to_bytes_coef)
for sinfo in _SINFO
if sinfo.to_bytes_coef is not None)
class NodeSensorsData(object):
def __init__(self, source_id, hostname, headers, values):
self.source_id = source_id
self.hostname = hostname
self.headers = headers
self.values = values
self.times = None
def finalize(self):
self.times = [v[0] for v in self.values]
def get_data_for_interval(self, beg, end):
p1 = bisect.bisect_left(self.times, beg)
p2 = bisect.bisect_right(self.times, end)
obj = self.__class__(self.source_id,
self.hostname,
self.headers,
self.values[p1:p2])
obj.times = self.times[p1:p2]
return obj
def __getitem__(self, name):
idx = self.headers.index(name.split('.'))
# +1 as first is a time
return [val[idx] for val in self.values]
def load_results_csv(fd):
data = fd.read()
results = {}
for block in data.split("NEW_DATA"):
block = block.strip()
if len(block) == 0:
continue
it = csv.reader(block.split("\n"))
headers = next(it)
sens_data = [map(float, vals) for vals in it]
source_id, hostname = headers[:2]
headers = [(None, 'time')] + \
[header.split('.') for header in headers[2:]]
assert set(map(len, headers)) == set([2])
results[source_id] = NodeSensorsData(source_id, hostname,
headers, sens_data)
return results
def load_test_timings(fname, max_diff=1000):
raw_map = collections.defaultdict(lambda: [])
class data(object):
pass
load_data_from(fname)(None, data)
for test_type, test_results in data.results.items():
if test_type == 'io':
for tests_res in test_results:
raw_map[tests_res.config.name].append(tests_res.run_interval)
result = {}
for name, intervals in raw_map.items():
intervals.sort()
curr_start, curr_stop = intervals[0]
curr_result = []
for (start, stop) in intervals[1:]:
if abs(curr_start - start) < max_diff:
# if abs(curr_stop - stop) > 2:
# print abs(curr_stop - stop)
assert abs(curr_stop - stop) < max_diff
else:
assert start + max_diff >= curr_stop
assert stop > curr_stop
curr_result.append((curr_start, curr_stop))
curr_start, curr_stop = start, stop
curr_result.append((curr_start, curr_stop))
merged_res = []
curr_start, curr_stop = curr_result[0]
for start, stop in curr_result[1:]:
if abs(curr_stop - start) < max_diff:
curr_stop = stop
else:
merged_res.append((curr_start, curr_stop))
curr_start, curr_stop = start, stop
merged_res.append((curr_start, curr_stop))
result[name] = merged_res
return result
critical_values = dict(
io_queue=1,
usage_percent=0.8,
procs_blocked=1,
procs_queue=1)
class AggregatedData(object):
def __init__(self, sensor_name):
self.sensor_name = sensor_name
# (node, device): count
self.per_device = collections.defaultdict(lambda: 0)
# node: count
self.per_node = collections.defaultdict(lambda: 0)
# role: count
self.per_role = collections.defaultdict(lambda: 0)
# (role_or_node, device_or_*): count
self.all_together = collections.defaultdict(lambda: 0)
def __str__(self):
res = "<AggregatedData({0})>\n".format(self.sensor_name)
for (role_or_node, device), val in self.all_together.items():
res += " {0}:{1} = {2}\n".format(role_or_node, device, val)
return res
def total_consumption(sensors_data, roles_map):
result = {}
for name, sensor_data in sensors_data.items():
for pos, (dev, sensor) in enumerate(sensor_data.headers):
if 'time' == sensor:
continue
try:
ad = result[sensor]
except KeyError:
ad = result[sensor] = AggregatedData(sensor)
val = sum(vals[pos] for vals in sensor_data.values)
ad.per_device[(sensor_data.hostname, dev)] += val
# vals1 = sensors_data['localhost:22']['sdc.sectors_read']
# vals2 = sensors_data['localhost:22']['sdb.sectors_written']
# from matplotlib import pyplot as plt
# plt.plot(range(len(vals1)), vals1)
# plt.plot(range(len(vals2)), vals2)
# plt.show()
# exit(1)
for ad in result.values():
for (hostname, dev), val in ad.per_device.items():
ad.per_node[hostname] += val
for role in roles_map[hostname]:
ad.per_role[role] += val
ad.all_together[(hostname, dev)] = val
for role, val in ad.per_role.items():
ad.all_together[(role, '*')] = val
for node, val in ad.per_node.items():
ad.all_together[(node, '*')] = val
return result
def avg_load(sensors_data):
load = collections.defaultdict(lambda: 0)
min_time = 0xFFFFFFFFFFF
max_time = 0
for sensor_data in sensors_data.values():
min_time = min(min_time, min(sensor_data.times))
max_time = max(max_time, max(sensor_data.times))
for name, max_val in critical_values.items():
for pos, (dev, sensor) in enumerate(sensor_data.headers):
if sensor == name:
for vals in sensor_data.values:
if vals[pos] > max_val:
load[(sensor_data.hostname, dev, sensor)] += 1
return load, max_time - min_time
def print_bottlenecks(sensors_data, max_bottlenecks=15):
load, duration = avg_load(sensors_data)
if not load:
return "\n*** No bottlenecks found *** \n"
rev_items = ((v, k) for (k, v) in load.items())
res = sorted(rev_items, reverse=True)[:max_bottlenecks]
max_name_sz = max(len(name) for _, name in res)
frmt = "{{0:>{0}}} | {{1:>4}}".format(max_name_sz)
table = [frmt.format("Component", "% times load > 100%")]
for (v, k) in res:
table.append(frmt.format(k, int(v * 100.0 / duration + 0.5)))
return "\n".join(table)
def print_consumption(agg, min_transfer=None):
rev_items = []
for (node_or_role, dev), v in agg.all_together.items():
rev_items.append((int(v), node_or_role + ':' + dev))
res = sorted(rev_items, reverse=True)
if min_transfer is not None:
res = [(v, k)
for (v, k) in res
if v >= min_transfer]
if len(res) == 0:
return None
res = [(b2ssize(v) + "B", k) for (v, k) in res]
max_name_sz = max(len(name) for _, name in res)
max_val_sz = max(len(val) for val, _ in res)
frmt = " {{0:>{0}}} | {{1:>{1}}} ".format(max_name_sz, max_val_sz)
table = [frmt.format("Component", "Usage")]
for (v, k) in res:
table.append(frmt.format(k, v))
return "\n".join(table)
def make_roles_mapping(source_id_mapping, source_id2hostname):
result = {}
for ssh_url, roles in source_id_mapping.items():
if '@' in ssh_url:
source_id = ssh_url.split('@')[1]
else:
source_id = ssh_url.split('://')[1]
if source_id.count(':') == 2:
source_id = source_id.rsplit(":", 1)[0]
if source_id.endswith(':'):
source_id += "22"
if source_id in source_id2hostname:
result[source_id] = roles
result[source_id2hostname[source_id]] = roles
for testnode_src in (set(source_id2hostname) - set(result)):
result[testnode_src] = ['testnode']
result[source_id2hostname[testnode_src]] = ['testnode']
return result
def get_testdata_size(consumption):
max_data = 0
for name, sens in SINFO_MAP.items():
if sens.to_bytes_coef is not None:
agg = consumption.get(name)
if agg is not None:
cdt = agg.per_role.get('testnode', 0) * sens.to_bytes_coef
max_data = max(max_data, cdt)
return max_data
def get_testop_cout(consumption):
max_op = 0
for name, sens in SINFO_MAP.items():
if sens.to_bytes_coef is None:
agg = consumption.get(name)
if agg is not None:
max_op = max(max_op, agg.per_role.get('testnode', 0))
return max_op
def get_data_for_intervals(data, intervals):
res = {}
for begin, end in intervals:
for name, node_data in data.items():
ndata = node_data.get_data_for_interval(begin, end)
res[name] = ndata
return res
class Host(object):
def __init__(self, name=None):
self.name = name
self.hdd_devs = {}
self.net_devs = None
def plot_consumption(per_consumer_table, fields, refload):
if pgv is None:
return
hosts = {}
storage_sensors = ('sectors_written', 'sectors_read')
for (hostname, dev), consumption in per_consumer_table.items():
if hostname not in hosts:
hosts[hostname] = Host(hostname)
host = hosts[hostname]
cons_map = dict(zip(fields, consumption))
for sn in storage_sensors:
vl = cons_map.get(sn, 0)
if vl > 0:
host.hdd_devs.setdefault(dev, {})[sn] = vl
p = pgv.AGraph(name='system', directed=True)
net = "Network"
p.add_node(net)
in_color = 'red'
out_color = 'green'
for host in hosts.values():
g = p.subgraph(name="cluster_" + host.name, label=host.name,
color="blue")
g.add_node(host.name, shape="diamond")
p.add_edge(host.name, net)
p.add_edge(net, host.name)
for dev_name, values in host.hdd_devs.items():
if dev_name == '*':
continue
to = values.get('sectors_written', 0)
frm = values.get('sectors_read', 0)
to_pw = 7 * to / refload
frm_pw = 7 * frm / refload
min_with = 0.1
if to_pw > min_with or frm_pw > min_with:
dev_fqn = host.name + "." + dev_name
g.add_node(dev_fqn)
if to_pw > min_with:
g.add_edge(host.name, dev_fqn,
label=b2ssize(to) + "B",
penwidth=to_pw,
fontcolor=out_color,
color=out_color)
if frm_pw > min_with:
g.add_edge(dev_fqn, host.name,
label=b2ssize(frm) + "B",
penwidth=frm_pw,
color=in_color,
fontcolor=in_color)
return p.string()
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--time_period', nargs=2,
type=int, default=None,
help="Begin and end time for tests")
parser.add_argument('-m', '--max-bottlenek', type=int,
default=15, help="Max bottleneck to show")
parser.add_argument('-x', '--max-diff', type=int,
default=10, help="Max bottleneck to show in" +
"0.1% from test nodes summ load")
parser.add_argument('-d', '--debug-ver', action='store_true',
help="Full report with original data")
parser.add_argument('-u', '--user-ver', action='store_true',
default=True, help="Avg load report")
parser.add_argument('-s', '--select-loads', nargs='*', default=[])
parser.add_argument('-f', '--fields', nargs='*', default=[])
parser.add_argument('results_folder')
return parser.parse_args(args[1:])
def main(argv):
opts = parse_args(argv)
stor_dir = os.path.join(opts.results_folder, 'sensor_storage')
data = {}
source_id2hostname = {}
csv_files = os.listdir(stor_dir)
for fname in csv_files:
assert re.match(r"\d+_\d+.csv$", fname)
csv_files.sort(key=lambda x: int(x.split('_')[0]))
for fname in csv_files:
with open(os.path.join(stor_dir, fname)) as fd:
for name, node_sens_data in load_results_csv(fd).items():
if name in data:
assert data[name].hostname == node_sens_data.hostname
assert data[name].source_id == node_sens_data.source_id
assert data[name].headers == node_sens_data.headers
data[name].values.extend(node_sens_data.values)
else:
data[name] = node_sens_data
for nd in data.values():
assert nd.source_id not in source_id2hostname
source_id2hostname[nd.source_id] = nd.hostname
nd.finalize()
roles_file = os.path.join(opts.results_folder,
'nodes.yaml')
src2roles = yaml.load(open(roles_file))
timings = load_test_timings(opts.results_folder)
roles_map = make_roles_mapping(src2roles, source_id2hostname)
max_diff = float(opts.max_diff) / 1000
fields = ('recv_bytes', 'send_bytes',
'sectors_read', 'sectors_written',
'reads_completed', 'writes_completed')
if opts.fields != []:
fields = [field for field in fields if field in opts.fields]
for test_name, intervals in sorted(timings.items()):
if opts.select_loads != []:
if test_name not in opts.select_loads:
continue
data_chunks = get_data_for_intervals(data, intervals)
consumption = total_consumption(data_chunks, roles_map)
bottlenecks = print_bottlenecks(data_chunks)
testdata_sz = get_testdata_size(consumption) * max_diff
testop_count = get_testop_cout(consumption) * max_diff
per_consumer_table = {}
per_consumer_table_str = {}
all_consumers = set()#consumption.values()[0].all_together)
for value in consumption.values():
all_consumers = all_consumers | set(value.all_together)
fields = [field for field in fields if field in consumption]
all_consumers_sum = []
for consumer in all_consumers:
tb_str = per_consumer_table_str[consumer] = []
tb = per_consumer_table[consumer] = []
vl = 0
for name in fields:
val = consumption[name].all_together[consumer]
if SINFO_MAP[name].to_bytes_coef is None:
if val < testop_count:
tb_str.append('0')
else:
tb_str.append(b2ssize_10(int(val)))
else:
val = int(val) * SINFO_MAP[name].to_bytes_coef
if val < testdata_sz:
tb_str.append('-')
else:
tb_str.append(b2ssize(val) + "B")
tb.append(int(val))
vl += int(val)
all_consumers_sum.append((vl, consumer))
all_consumers_sum.sort(reverse=True)
plot_consumption(per_consumer_table, fields,
testdata_sz / max_diff)
tt = texttable.Texttable(max_width=130)
tt.set_cols_align(["l"] + ["r"] * len(fields))
header = ["Name"]
for fld in fields:
if fld in SINFO_MAP:
header.append(SINFO_MAP[fld].print_name)
else:
header.append(fld)
tt.header(header)
for summ, consumer in all_consumers_sum:
if summ > 0:
tt.add_row([":".join(consumer)] +
per_consumer_table_str[consumer])
tt.set_deco(texttable.Texttable.VLINES | texttable.Texttable.HEADER)
res = tt.draw()
max_len = max(map(len, res.split("\n")))
print test_name.center(max_len)
print res
print bottlenecks
if __name__ == "__main__":
exit(main(sys.argv))
| apache-2.0 |
RachitKansal/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
JsNoNo/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
tencrance/cool-config | ml_keras_learn/tutorials/matplotlibTUT/plt19_animation.py | 3 | 1573 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 19 - animation
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://matplotlib.org/examples/animation/simple_anim.html
More animation example code:
http://matplotlib.org/examples/animation/
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
fig, ax = plt.subplots()
x = np.arange(0, 2*np.pi, 0.01)
line, = ax.plot(x, np.sin(x))
def animate(i):
line.set_ydata(np.sin(x + i/10.0)) # update the data
return line,
# Init only required for blitting to give a clean slate.
def init():
line.set_ydata(np.sin(x))
return line,
# call the animator. blit=True means only re-draw the parts that have changed.
# blit=True dose not work on Mac, set blit=False
# interval= update frequency
ani = animation.FuncAnimation(fig=fig, func=animate, frames=100, init_func=init,
interval=20, blit=False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show() | mit |
great-expectations/great_expectations | great_expectations/dataset/pandas_dataset.py | 1 | 67915 | import inspect
import json
import logging
import warnings
from datetime import datetime
from functools import wraps
from operator import ge, gt, le, lt
from typing import List
import jsonschema
import numpy as np
import pandas as pd
from dateutil.parser import parse
from scipy import stats
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import (
_scipy_distribution_positional_args_from_dict,
is_valid_continuous_partition_object,
validate_distribution_parameters,
)
from .dataset import Dataset
logger = logging.getLogger(__name__)
class MetaPandasDataset(Dataset):
"""MetaPandasDataset is a thin layer between Dataset and PandasDataset.
This two-layer inheritance is required to make @classmethod decorators work.
Practically speaking, that means that MetaPandasDataset implements \
expectation decorators, like `column_map_expectation` and `column_aggregate_expectation`, \
and PandasDataset implements the expectation methods themselves.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The MetaPandasDataset implementation replaces the "column" parameter supplied by the user with a pandas Series
object containing the actual column from the relevant pandas dataframe. This simplifies the implementing expectation
logic while preserving the standard Dataset signature and expected behavior.
See :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>` \
for full documentation of this function.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
if row_condition and self._supports_row_condition:
data = self._apply_row_condition(
row_condition=row_condition, condition_parser=condition_parser
)
else:
data = self
series = data[column]
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
# FIXME rename to mapped_ignore_values?
boolean_mapped_null_values = np.full(series.shape, False)
result_format["partial_unexpected_count"] = 0
else:
boolean_mapped_null_values = series.isnull().values
element_count = int(len(series))
# FIXME rename nonnull to non_ignored?
nonnull_values = series[boolean_mapped_null_values == False]
nonnull_count = int((boolean_mapped_null_values == False).sum())
boolean_mapped_success_values = func(self, nonnull_values, *args, **kwargs)
success_count = np.count_nonzero(boolean_mapped_success_values)
unexpected_list = list(
nonnull_values[boolean_mapped_success_values == False]
)
unexpected_index_list = list(
nonnull_values[boolean_mapped_success_values == False].index
)
if "output_strftime_format" in kwargs:
output_strftime_format = kwargs["output_strftime_format"]
parsed_unexpected_list = []
for val in unexpected_list:
if val is None:
parsed_unexpected_list.append(val)
else:
if isinstance(val, str):
val = parse(val)
parsed_unexpected_list.append(
datetime.strftime(val, output_strftime_format)
)
unexpected_list = parsed_unexpected_list
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list,
unexpected_index_list,
)
# FIXME Temp fix for result format
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
del return_obj["result"]["unexpected_percent_nonmissing"]
del return_obj["result"]["missing_count"]
del return_obj["result"]["missing_percent"]
try:
del return_obj["result"]["partial_unexpected_counts"]
del return_obj["result"]["partial_unexpected_list"]
except KeyError:
pass
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def column_pair_map_expectation(cls, func):
"""
The column_pair_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per row basis across a pair of columns.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column_A,
column_B,
mostly=None,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
if row_condition:
self = self.query(row_condition).reset_index(drop=True)
series_A = self[column_A]
series_B = self[column_B]
if ignore_row_if == "both_values_are_missing":
boolean_mapped_null_values = series_A.isnull() & series_B.isnull()
elif ignore_row_if == "either_value_is_missing":
boolean_mapped_null_values = series_A.isnull() | series_B.isnull()
elif ignore_row_if == "never":
boolean_mapped_null_values = series_A.map(lambda x: False)
else:
raise ValueError("Unknown value of ignore_row_if: %s", (ignore_row_if,))
assert len(series_A) == len(
series_B
), "Series A and B must be the same length"
# This next bit only works if series_A and _B are the same length
element_count = int(len(series_A))
nonnull_count = (boolean_mapped_null_values == False).sum()
nonnull_values_A = series_A[boolean_mapped_null_values == False]
nonnull_values_B = series_B[boolean_mapped_null_values == False]
nonnull_values = [
value_pair
for value_pair in zip(list(nonnull_values_A), list(nonnull_values_B))
]
boolean_mapped_success_values = func(
self, nonnull_values_A, nonnull_values_B, *args, **kwargs
)
success_count = boolean_mapped_success_values.sum()
unexpected_list = [
value_pair
for value_pair in zip(
list(
series_A[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
]
),
list(
series_B[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
]
),
)
]
unexpected_index_list = list(
series_A[
(boolean_mapped_success_values == False)
& (boolean_mapped_null_values == False)
].index
)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list,
unexpected_index_list,
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
@classmethod
def multicolumn_map_expectation(cls, func):
"""
The multicolumn_map_expectation decorator handles boilerplate issues surrounding the common pattern of
evaluating truthiness of some condition on a per row basis across a set of columns.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs,
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
if row_condition:
self = self.query(row_condition).reset_index(drop=True)
test_df = self[column_list]
if ignore_row_if == "all_values_are_missing":
boolean_mapped_skip_values = test_df.isnull().all(axis=1)
elif ignore_row_if == "any_value_is_missing":
boolean_mapped_skip_values = test_df.isnull().any(axis=1)
elif ignore_row_if == "never":
boolean_mapped_skip_values = pd.Series([False] * len(test_df))
else:
raise ValueError("Unknown value of ignore_row_if: %s", (ignore_row_if,))
boolean_mapped_success_values = func(
self, test_df[boolean_mapped_skip_values == False], *args, **kwargs
)
success_count = boolean_mapped_success_values.sum()
nonnull_count = (~boolean_mapped_skip_values).sum()
element_count = len(test_df)
unexpected_list = test_df[
(boolean_mapped_skip_values == False)
& (boolean_mapped_success_values == False)
]
unexpected_index_list = list(unexpected_list.index)
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
element_count,
nonnull_count,
len(unexpected_list),
unexpected_list.to_dict(orient="records"),
unexpected_index_list,
)
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
class PandasDataset(MetaPandasDataset, pd.DataFrame):
"""
PandasDataset instantiates the great_expectations Expectations API as a subclass of a pandas.DataFrame.
For the full API reference, please see :func:`Dataset <great_expectations.data_asset.dataset.Dataset>`
Notes:
1. Samples and Subsets of PandaDataSet have ALL the expectations of the original \
data frame unless the user specifies the ``discard_subset_failing_expectations = True`` \
property on the original data frame.
2. Concatenations, joins, and merges of PandaDataSets contain NO expectations (since no autoinspection
is performed by default).
--ge-feature-maturity-info--
id: validation_engine_pandas
title: Validation Engine - Pandas
icon:
short_description: Use Pandas DataFrame to validate data
description: Use Pandas DataFrame to validate data
how_to_guide_url:
maturity: Production
maturity_details:
api_stability: Stable
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: N/A -> see relevant Datasource evaluation
documentation_completeness: Complete
bug_risk: Low
expectation_completeness: Complete
--ge-feature-maturity-info--
"""
# this is necessary to subclass pandas in a proper way.
# NOTE: specifying added properties in this way means that they will NOT be carried over when
# the dataframe is manipulated, which we might want. To specify properties that are carried over
# to manipulation results, we would just use `_metadata = ['row_count', ...]` here. The most likely
# case is that we want the former, but also want to re-initialize these values to None so we don't
# get an attribute error when trying to access them (I think this could be done in __finalize__?)
_internal_names = pd.DataFrame._internal_names + [
"_batch_kwargs",
"_batch_markers",
"_batch_parameters",
"_batch_id",
"_expectation_suite",
"_config",
"caching",
"default_expectation_args",
"discard_subset_failing_expectations",
]
_internal_names_set = set(_internal_names)
_supports_row_condition = True
# We may want to expand or alter support for subclassing dataframes in the future:
# See http://pandas.pydata.org/pandas-docs/stable/extending.html#extending-subclassing-pandas
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.discard_subset_failing_expectations = kwargs.get(
"discard_subset_failing_expectations", False
)
@property
def _constructor(self):
return self.__class__
def __finalize__(self, other, method=None, **kwargs):
if isinstance(other, PandasDataset):
self._initialize_expectations(other._expectation_suite)
# If other was coerced to be a PandasDataset (e.g. via _constructor call during self.copy() operation)
# then it may not have discard_subset_failing_expectations set. Default to self value
self.discard_subset_failing_expectations = getattr(
other,
"discard_subset_failing_expectations",
self.discard_subset_failing_expectations,
)
if self.discard_subset_failing_expectations:
self.discard_failing_expectations()
super().__finalize__(other, method, **kwargs)
return self
def _apply_row_condition(self, row_condition, condition_parser):
if condition_parser not in ["python", "pandas"]:
raise ValueError(
"condition_parser is required when setting a row_condition,"
" and must be 'python' or 'pandas'"
)
else:
return self.query(row_condition, parser=condition_parser).reset_index(
drop=True
)
def get_row_count(self):
return self.shape[0]
def get_column_count(self):
return self.shape[1]
def get_table_columns(self) -> List[str]:
return list(self.columns)
def get_column_sum(self, column):
return self[column].sum()
def get_column_max(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.max()
def get_column_min(self, column, parse_strings_as_datetimes=False):
temp_column = self[column].dropna()
if parse_strings_as_datetimes:
temp_column = temp_column.map(parse)
return temp_column.min()
def get_column_mean(self, column):
return self[column].mean()
def get_column_nonnull_count(self, column):
series = self[column]
null_indexes = series.isnull()
nonnull_values = series[null_indexes == False]
return len(nonnull_values)
def get_column_value_counts(self, column, sort="value", collate=None):
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
if collate is not None:
raise ValueError("collate parameter is not supported in PandasDataset")
counts = self[column].value_counts()
if sort == "value":
try:
counts.sort_index(inplace=True)
except TypeError:
# Having values of multiple types in a object dtype column (e.g., strings and floats)
# raises a TypeError when the sorting method performs comparisons.
if self[column].dtype == object:
counts.index = counts.index.astype(str)
counts.sort_index(inplace=True)
elif sort == "counts":
counts.sort_values(inplace=True)
counts.name = "count"
counts.index.name = "value"
return counts
def get_column_unique_count(self, column):
return self.get_column_value_counts(column).shape[0]
def get_column_modes(self, column):
return list(self[column].mode().values)
def get_column_median(self, column):
return self[column].median()
def get_column_quantiles(self, column, quantiles, allow_relative_error=False):
interpolation_options = ("linear", "lower", "higher", "midpoint", "nearest")
if not allow_relative_error:
allow_relative_error = "nearest"
if allow_relative_error not in interpolation_options:
raise ValueError(
f"If specified for pandas, allow_relative_error must be one an allowed value for the 'interpolation'"
f"parameter of .quantile() (one of {interpolation_options})"
)
return (
self[column]
.quantile(quantiles, interpolation=allow_relative_error)
.tolist()
)
def get_column_stdev(self, column):
return self[column].std()
def get_column_hist(self, column, bins):
hist, bin_edges = np.histogram(self[column], bins, density=False)
return list(hist)
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
# TODO this logic could probably go in the non-underscore version if we want to cache
if min_val is None and max_val is None:
raise ValueError("Must specify either min or max value")
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError("Min value must be <= to max value")
result = self[column]
if min_val is not None:
if strict_min:
result = result[result > min_val]
else:
result = result[result >= min_val]
if max_val is not None:
if strict_max:
result = result[result < max_val]
else:
result = result[result <= max_val]
return len(result)
def get_crosstab(
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
):
"""Get crosstab of column_A and column_B, binning values if necessary"""
series_A = self.get_binned_values(self[column_A], bins_A, n_bins_A)
series_B = self.get_binned_values(self[column_B], bins_B, n_bins_B)
return pd.crosstab(series_A, columns=series_B)
def get_binned_values(self, series, bins, n_bins):
"""
Get binned values of series.
Args:
Series (pd.Series): Input series
bins (list):
Bins for the series. List of numeric if series is numeric or list of list
of series values else.
n_bins (int): Number of bins. Ignored if bins is not None.
"""
if n_bins is None:
n_bins = 10
if series.dtype in ["int", "float"]:
if bins is not None:
bins = sorted(np.unique(bins))
if np.min(series) < bins[0]:
bins = [np.min(series)] + bins
if np.max(series) > bins[-1]:
bins = bins + [np.max(series)]
if bins is None:
bins = np.histogram_bin_edges(series[series.notnull()], bins=n_bins)
# Make sure max of series is included in rightmost bin
bins[-1] = np.nextafter(bins[-1], bins[-1] + 1)
# Create labels for returned series
# Used in e.g. crosstab that is printed as observed value in data docs.
precision = int(np.log10(min(bins[1:] - bins[:-1]))) + 2
labels = [
f"[{round(lower, precision)}, {round(upper, precision)})"
for lower, upper in zip(bins[:-1], bins[1:])
]
if any(np.isnan(series)):
# Missings get digitized into bin = n_bins+1
labels += ["(missing)"]
return pd.Categorical.from_codes(
codes=np.digitize(series, bins=bins) - 1,
categories=labels,
ordered=True,
)
else:
if bins is None:
value_counts = series.value_counts(sort=True)
if len(value_counts) < n_bins + 1:
return series.fillna("(missing)")
else:
other_values = sorted(value_counts.index[n_bins:])
replace = {value: "(other)" for value in other_values}
else:
replace = dict()
for x in bins:
replace.update({value: ", ".join(x) for value in x})
return (
series.replace(to_replace=replace)
.fillna("(missing)")
.astype("category")
)
### Expectation methods ###
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return ~column.duplicated(keep=False)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
include_nulls=True,
):
return ~column.isnull()
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.isnull()
@DocInherit
def expect_column_values_to_be_of_type(
self,
column,
type_,
**kwargs
# Since we've now received the default arguments *before* the expectation decorator, we need to
# ensure we only pass what we actually received. Hence, we'll use kwargs
# mostly=None,
# result_format=None,
# row_condition=None, condition_parser=None, include_config=None, catch_exceptions=None, meta=None
):
"""
The pandas implementation of this expectation takes kwargs mostly, result_format, include_config,
catch_exceptions, and meta as other expectations, however it declares **kwargs because it needs to
be able to fork into either aggregate or map semantics depending on the column type (see below).
In Pandas, columns *may* be typed, or they may be of the generic "object" type which can include rows with
different storage types in the same column.
To respect that implementation, the expect_column_values_to_be_of_type expectations will first attempt to
use the column dtype information to determine whether the column is restricted to the provided type. If that
is possible, then expect_column_values_to_be_of_type will return aggregate information including an
observed_value, similarly to other backends.
If it is not possible (because the column dtype is "object" but a more specific type was specified), then
PandasDataset will use column map semantics: it will return map expectation results and
check each value individually, which can be substantially slower.
Unfortunately, the "object" type is also used to contain any string-type columns (including 'str' and
numpy 'string_' (bytes)); consequently, it is not possible to test for string columns using aggregate semantics.
"""
# Short-circuit if the dtype tells us; in that case use column-aggregate (vs map) semantics
if (
self[column].dtype != "object"
or type_ is None
or type_ in ["object", "object_", "O"]
):
res = self._expect_column_values_to_be_of_type__aggregate(
column, type_, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_of_type__aggregate",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
else:
res = self._expect_column_values_to_be_of_type__map(column, type_, **kwargs)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_of_type__map",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_of_type",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
return res
@DataAsset.expectation(["column", "type_", "mostly"])
def _expect_column_values_to_be_of_type__aggregate(
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"PandasDataset cannot support mostly for a column with a non-object dtype."
)
if type_ is None:
success = True
else:
comp_types = []
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = self[column].dtype.type in comp_types
return {
"success": success,
"result": {"observed_value": self[column].dtype.type.__name__},
}
@staticmethod
def _native_type_type_map(type_):
# We allow native python types in cases where the underlying type is "object":
if type_.lower() == "none":
return (type(None),)
elif type_.lower() == "bool":
return (bool,)
elif type_.lower() in ["int", "long"]:
return (int,)
elif type_.lower() == "float":
return (float,)
elif type_.lower() == "bytes":
return (bytes,)
elif type_.lower() == "complex":
return (complex,)
elif type_.lower() in ["str", "string_types"]:
return (str,)
elif type_.lower() == "list":
return (list,)
elif type_.lower() == "dict":
return (dict,)
elif type_.lower() == "unicode":
return None
@MetaPandasDataset.column_map_expectation
def _expect_column_values_to_be_of_type__map(
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
comp_types = []
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
if len(comp_types) < 1:
raise ValueError("Unrecognized numpy/python type: %s" % type_)
return column.map(lambda x: isinstance(x, tuple(comp_types)))
@DocInherit
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
**kwargs
# Since we've now received the default arguments *before* the expectation decorator, we need to
# ensure we only pass what we actually received. Hence, we'll use kwargs
# mostly=None,
# result_format = None,
# row_condition=None, condition_parser=None, include_config=None, catch_exceptions=None, meta=None
):
"""
The pandas implementation of this expectation takes kwargs mostly, result_format, include_config,
catch_exceptions, and meta as other expectations, however it declares **kwargs because it needs to
be able to fork into either aggregate or map semantics depending on the column type (see below).
In Pandas, columns *may* be typed, or they may be of the generic "object" type which can include rows with
different storage types in the same column.
To respect that implementation, the expect_column_values_to_be_of_type expectations will first attempt to
use the column dtype information to determine whether the column is restricted to the provided type. If that
is possible, then expect_column_values_to_be_of_type will return aggregate information including an
observed_value, similarly to other backends.
If it is not possible (because the column dtype is "object" but a more specific type was specified), then
PandasDataset will use column map semantics: it will return map expectation results and
check each value individually, which can be substantially slower.
Unfortunately, the "object" type is also used to contain any string-type columns (including 'str' and
numpy 'string_' (bytes)); consequently, it is not possible to test for string columns using aggregate semantics.
"""
# Short-circuit if the dtype tells us; in that case use column-aggregate (vs map) semantics
if self[column].dtype != "object" or type_list is None:
res = self._expect_column_values_to_be_in_type_list__aggregate(
column, type_list, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_in_type_list__aggregate",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
else:
res = self._expect_column_values_to_be_in_type_list__map(
column, type_list, **kwargs
)
# Note: this logic is similar to the logic in _append_expectation for deciding when to overwrite an
# existing expectation, but it should be definitely kept in sync
# We do not need this bookkeeping if we are in an active validation:
if self._active_validation:
return res
# First, if there is an existing expectation of this type, delete it. Then change the one we created to be
# of the proper expectation_type
existing_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
if len(existing_expectations) == 1:
self._expectation_suite.expectations.pop(existing_expectations[0])
# Now, rename the expectation we just added
new_expectations = self._expectation_suite.find_expectation_indexes(
ExpectationConfiguration(
expectation_type="_expect_column_values_to_be_in_type_list__map",
kwargs={"column": column},
)
)
assert len(new_expectations) == 1
old_config = self._expectation_suite.expectations[new_expectations[0]]
new_config = ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs=old_config.kwargs,
meta=old_config.meta,
success_on_last_run=old_config.success_on_last_run,
)
self._expectation_suite.expectations[new_expectations[0]] = new_config
return res
@MetaPandasDataset.expectation(["column", "type_list", "mostly"])
def _expect_column_values_to_be_in_type_list__aggregate(
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"PandasDataset cannot support mostly for a column with a non-object dtype."
)
if type_list is None:
success = True
else:
comp_types = []
for type_ in type_list:
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
success = self[column].dtype.type in comp_types
return {
"success": success,
"result": {"observed_value": self[column].dtype.type.__name__},
}
@MetaPandasDataset.column_map_expectation
def _expect_column_values_to_be_in_type_list__map(
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
comp_types = []
for type_ in type_list:
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = self._native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
if len(comp_types) < 1:
raise ValueError("No recognized numpy/python type in list: %s" % type_list)
return column.map(lambda x: isinstance(x, tuple(comp_types)))
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_set is None:
# Vacuously true
return np.ones(len(column), dtype=np.bool_)
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return ~column.isin(parsed_value_set)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
parse_strings_as_datetimes=None,
output_strftime_format=None,
allow_cross_type_comparisons=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if parse_strings_as_datetimes:
# tolerance = timedelta(days=tolerance)
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
try:
temp_column = column.map(parse)
except TypeError:
temp_column = column
else:
temp_column = column
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
def comparator_factory(comparator, comparison_value):
def new_comparator(value):
return comparator(value, comparison_value)
def always_true(value):
return True
return always_true if comparison_value is None else new_comparator
min_comparator = comparator_factory(gt if strict_min else ge, min_value)
max_comparator = comparator_factory(lt if strict_max else le, max_value)
def cross_type_comparator(val):
try:
return min_comparator(val) & max_comparator(val)
except TypeError:
return False
try:
return min_comparator(temp_column) & max_comparator(temp_column)
except TypeError:
if allow_cross_type_comparisons:
return pd.Series(cross_type_comparator(val) for val in temp_column)
raise TypeError(
"Column values, min_value, and max_value must either be None or of the same type."
)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_increasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(1)
if strictly:
return col_diff > pd.Timedelta(0)
else:
return col_diff >= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = 1
if strictly:
return col_diff > 0
else:
return col_diff >= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_decreasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
temp_column = column.map(parse)
col_diff = temp_column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[0] = pd.Timedelta(-1)
if strictly:
return col_diff < pd.Timedelta(0)
else:
return col_diff <= pd.Timedelta(0)
else:
col_diff = column.diff()
# The first element is null, so it gets a bye and is always treated as True
col_diff[col_diff.isnull()] = -1
if strictly:
return col_diff < 0
else:
return col_diff <= 0
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# Assert that min_value and max_value are integers
try:
if min_value is not None and not float(min_value).is_integer():
raise ValueError("min_value and max_value must be integers")
if max_value is not None and not float(max_value).is_integer():
raise ValueError("min_value and max_value must be integers")
except ValueError:
raise ValueError("min_value and max_value must be integers")
column_lengths = column.astype(str).str.len()
if min_value is not None and max_value is not None:
return column_lengths.between(min_value, max_value)
elif min_value is None and max_value is not None:
return column_lengths <= max_value
elif min_value is not None and max_value is None:
return column_lengths >= min_value
else:
return False
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.str.len() == value
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return ~column.astype(str).str.contains(regex)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
if match_on == "any":
return regex_match_df.any(axis="columns")
elif match_on == "all":
return regex_match_df.all(axis="columns")
else:
raise ValueError("match_on must be either 'any' or 'all'")
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_matches = []
for regex in regex_list:
regex_matches.append(column.astype(str).str.contains(regex))
regex_match_df = pd.concat(regex_matches, axis=1, ignore_index=True)
return ~regex_match_df.any(axis="columns")
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_strftime_format(
self,
column,
strftime_format,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Below is a simple validation that the provided format can both format and parse a datetime object.
# %D is an example of a format that can format but not parse, e.g.
try:
datetime.strptime(
datetime.strftime(datetime.now(), strftime_format), strftime_format
)
except ValueError as e:
raise ValueError("Unable to use provided strftime_format. " + str(e))
def is_parseable_by_format(val):
try:
datetime.strptime(val, strftime_format)
return True
except TypeError:
raise TypeError(
"Values passed to expect_column_values_to_match_strftime_format must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
)
except ValueError:
return False
return column.map(is_parseable_by_format)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_dateutil_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def is_parseable(val):
try:
if type(val) != str:
raise TypeError(
"Values passed to expect_column_values_to_be_dateutil_parseable must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
)
parse(val)
return True
except (ValueError, OverflowError):
return False
return column.map(is_parseable)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_json_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def is_json(val):
try:
json.loads(val)
return True
except:
return False
return column.map(is_json)
@DocInherit
@MetaPandasDataset.column_map_expectation
def expect_column_values_to_match_json_schema(
self,
column,
json_schema,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
def matches_json_schema(val):
try:
val_json = json.loads(val)
jsonschema.validate(val_json, json_schema)
# jsonschema.validate raises an error if validation fails.
# So if we make it this far, we know that the validation succeeded.
return True
except jsonschema.ValidationError:
return False
except jsonschema.SchemaError:
raise
except:
raise
return column.map(matches_json_schema)
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(
self,
column,
distribution,
p_value=0.05,
params=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
column = self[column]
if p_value <= 0 or p_value >= 1:
raise ValueError("p_value must be between 0 and 1 exclusive")
# Validate params
try:
validate_distribution_parameters(distribution=distribution, params=params)
except ValueError as e:
raise e
# Format arguments for scipy.kstest
if isinstance(params, dict):
positional_parameters = _scipy_distribution_positional_args_from_dict(
distribution, params
)
else:
positional_parameters = params
# K-S Test
ks_result = stats.kstest(column, distribution, args=positional_parameters)
return {
"success": ks_result[1] >= p_value,
"result": {
"observed_value": ks_result[1],
"details": {
"expected_params": positional_parameters,
"observed_ks_result": ks_result,
},
},
}
@DocInherit
@MetaPandasDataset.column_aggregate_expectation
def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(
self,
column,
partition_object=None,
p=0.05,
bootstrap_samples=None,
bootstrap_sample_size=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
column = self[column]
if not is_valid_continuous_partition_object(partition_object):
raise ValueError("Invalid continuous partition object.")
# TODO: consider changing this into a check that tail_weights does not exist exclusively, by moving this check into is_valid_continuous_partition_object
if (partition_object["bins"][0] == -np.inf) or (
partition_object["bins"][-1] == np.inf
):
raise ValueError("Partition endpoints must be finite.")
if (
"tail_weights" in partition_object
and np.sum(partition_object["tail_weights"]) > 0
):
raise ValueError(
"Partition cannot have tail weights -- endpoints must be finite."
)
test_cdf = np.append(np.array([0]), np.cumsum(partition_object["weights"]))
def estimated_cdf(x):
return np.interp(x, partition_object["bins"], test_cdf)
if bootstrap_samples is None:
bootstrap_samples = 1000
if bootstrap_sample_size is None:
# Sampling too many elements (or not bootstrapping) will make the test too sensitive to the fact that we've
# compressed via a partition.
# Sampling too few elements will make the test insensitive to significant differences, especially
# for nonoverlapping ranges.
bootstrap_sample_size = len(partition_object["weights"]) * 2
results = [
stats.kstest(
np.random.choice(column, size=bootstrap_sample_size), estimated_cdf
)[1]
for _ in range(bootstrap_samples)
]
test_result = (1 + sum(x >= p for x in results)) / (bootstrap_samples + 1)
hist, bin_edges = np.histogram(column, partition_object["bins"])
below_partition = len(np.where(column < partition_object["bins"][0])[0])
above_partition = len(np.where(column > partition_object["bins"][-1])[0])
# Expand observed partition to report, if necessary
if below_partition > 0 and above_partition > 0:
observed_bins = (
[np.min(column)] + partition_object["bins"] + [np.max(column)]
)
observed_weights = np.concatenate(
([below_partition], hist, [above_partition])
) / len(column)
elif below_partition > 0:
observed_bins = [np.min(column)] + partition_object["bins"]
observed_weights = np.concatenate(([below_partition], hist)) / len(column)
elif above_partition > 0:
observed_bins = partition_object["bins"] + [np.max(column)]
observed_weights = np.concatenate((hist, [above_partition])) / len(column)
else:
observed_bins = partition_object["bins"]
observed_weights = hist / len(column)
observed_cdf_values = np.cumsum(observed_weights)
return_obj = {
"success": test_result > p,
"result": {
"observed_value": test_result,
"details": {
"bootstrap_samples": bootstrap_samples,
"bootstrap_sample_size": bootstrap_sample_size,
"observed_partition": {
"bins": observed_bins,
"weights": observed_weights.tolist(),
},
"expected_partition": {
"bins": partition_object["bins"],
"weights": partition_object["weights"],
},
"observed_cdf": {
"x": observed_bins,
"cdf_values": [0] + observed_cdf_values.tolist(),
},
"expected_cdf": {
"x": partition_object["bins"],
"cdf_values": test_cdf.tolist(),
},
},
},
}
return return_obj
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_equal(
self,
column_A,
column_B,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return column_A == column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_A_to_be_greater_than_B(
self,
column_A,
column_B,
or_equal=None,
parse_strings_as_datetimes=None,
allow_cross_type_comparisons=None,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# FIXME
if allow_cross_type_comparisons == True:
raise NotImplementedError
if parse_strings_as_datetimes:
temp_column_A = column_A.map(parse)
temp_column_B = column_B.map(parse)
else:
temp_column_A = column_A
temp_column_B = column_B
if or_equal == True:
return temp_column_A >= temp_column_B
else:
return temp_column_A > temp_column_B
@DocInherit
@MetaPandasDataset.column_pair_map_expectation
def expect_column_pair_values_to_be_in_set(
self,
column_A,
column_B,
value_pairs_set,
ignore_row_if="both_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_pairs_set is None:
# vacuously true
return np.ones(len(column_A), dtype=np.bool_)
temp_df = pd.DataFrame({"A": column_A, "B": column_B})
value_pairs_set = {(x, y) for x, y in value_pairs_set}
results = []
for i, t in temp_df.iterrows():
if pd.isnull(t["A"]):
a = None
else:
a = t["A"]
if pd.isnull(t["B"]):
b = None
else:
b = t["B"]
results.append((a, b) in value_pairs_set)
return pd.Series(results, temp_df.index)
def expect_multicolumn_values_to_be_unique(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
deprecation_warning = (
"expect_multicolumn_values_to_be_unique is being deprecated. Please use "
"expect_select_column_values_to_be_unique_within_record instead."
)
warnings.warn(
deprecation_warning,
DeprecationWarning,
)
return self.expect_select_column_values_to_be_unique_within_record(
column_list=column_list,
mostly=mostly,
ignore_row_if=ignore_row_if,
result_format=result_format,
include_config=include_config,
catch_exceptions=catch_exceptions,
meta=meta,
)
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_select_column_values_to_be_unique_within_record(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
threshold = len(column_list.columns)
# Do not dropna here, since we have separately dealt with na in decorator
return column_list.nunique(dropna=False, axis=1) >= threshold
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_multicolumn_sum_to_equal(
self,
column_list,
sum_total,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
""" Multi-Column Map Expectation
Expects that sum of all rows for a set of columns is equal to a specific value
Args:
column_list (List[str]): \
Set of columns to be checked
sum_total (int): \
expected sum of columns
"""
return column_list.sum(axis=1) == sum_total
@DocInherit
@MetaPandasDataset.multicolumn_map_expectation
def expect_compound_columns_to_be_unique(
self,
column_list,
mostly=None,
ignore_row_if="all_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Do not dropna here, since we have separately dealt with na in decorator
# Invert boolean so that duplicates are False and non-duplicates are True
return ~column_list.duplicated(keep=False)
| apache-2.0 |
alphaBenj/zipline | zipline/utils/calendars/exchange_calendar_cme.py | 7 | 3143 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from pandas.tseries.holiday import (
USPresidentsDay,
USLaborDay,
USThanksgivingDay,
GoodFriday
)
from pytz import timezone
# Useful resources for making changes to this file:
# http://www.cmegroup.com/tools-information/holiday-calendar.html
from .trading_calendar import TradingCalendar, HolidayCalendar
from .us_holidays import (
USNewYearsDay,
Christmas,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
USBlackFridayInOrAfter1993,
USNationalDaysofMourning,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay)
class CMEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for CME
Open Time: 5:00 PM, America/Chicago
Close Time: 5:00 PM, America/Chicago
Regularly-Observed Holidays:
- New Years Day
- Good Friday
- Christmas
"""
@property
def name(self):
return "CME"
@property
def tz(self):
return timezone('America/Chicago')
@property
def open_time(self):
return time(17, 1)
@property
def close_time(self):
return time(17)
@property
def open_offset(self):
return -1
@property
def regular_holidays(self):
# The CME has different holiday rules depending on the type of
# instrument. For example, http://www.cmegroup.com/tools-information/holiday-calendar/files/2016-4th-of-july-holiday-schedule.pdf # noqa
# shows that Equity, Interest Rate, FX, Energy, Metals & DME Products
# close at 1200 CT on July 4, 2016, while Grain, Oilseed & MGEX
# Products and Livestock, Dairy & Lumber products are completely
# closed.
# For now, we will treat the CME as having a single calendar, and just
# go with the most conservative hours - and treat July 4 as an early
# close at noon.
return HolidayCalendar([
USNewYearsDay,
GoodFriday,
Christmas,
])
@property
def adhoc_holidays(self):
return USNationalDaysofMourning
@property
def special_closes(self):
return [(
time(12),
HolidayCalendar([
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
])
)]
| apache-2.0 |
zfrenchee/pandas | pandas/tests/generic/test_label_or_level_utils.py | 2 | 13147 | import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas.core.dtypes.missing import array_equivalent
# Fixtures
# ========
@pytest.fixture
def df():
"""DataFrame with columns 'L1', 'L2', and 'L3' """
return pd.DataFrame({'L1': [1, 2, 3],
'L2': [11, 12, 13],
'L3': ['A', 'B', 'C']})
@pytest.fixture(params=[[], ['L1'], ['L1', 'L2'], ['L1', 'L2', 'L3']])
def df_levels(request, df):
"""DataFrame with columns or index levels 'L1', 'L2', and 'L3' """
levels = request.param
if levels:
df = df.set_index(levels)
return df
@pytest.fixture
def df_ambig(df):
"""DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3' """
df = df.set_index(['L1', 'L2'])
df['L1'] = df['L3']
return df
@pytest.fixture
def df_duplabels(df):
"""DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2' """
df = df.set_index(['L1'])
df = pd.concat([df, df['L2']], axis=1)
return df
@pytest.fixture
def panel():
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
return pd.Panel()
# Test is label/level reference
# =============================
def get_labels_levels(df_levels):
expected_labels = list(df_levels.columns)
expected_levels = [name for name in df_levels.index.names
if name is not None]
return expected_labels, expected_levels
def assert_label_reference(frame, labels, axis):
for label in labels:
assert frame._is_label_reference(label, axis=axis)
assert not frame._is_level_reference(label, axis=axis)
assert frame._is_label_or_level_reference(label, axis=axis)
def assert_level_reference(frame, levels, axis):
for level in levels:
assert frame._is_level_reference(level, axis=axis)
assert not frame._is_label_reference(level, axis=axis)
assert frame._is_label_or_level_reference(level, axis=axis)
# DataFrame
# ---------
@pytest.mark.parametrize('axis', [0, 1])
def test_is_level_or_label_reference_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis == 1:
df_levels = df_levels.T
# Perform checks
assert_level_reference(df_levels, expected_levels, axis=axis)
assert_label_reference(df_levels, expected_labels, axis=axis)
@pytest.mark.parametrize('axis', [0, 1])
def test_is_level_reference_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
if axis == 1:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
# Therefore L1 should reference the label, not the level
assert_label_reference(df_ambig, ['L1'], axis=axis)
# df has an on-axis level named L2 and it is not ambiguous
# Therefore L2 is an level reference
assert_level_reference(df_ambig, ['L2'], axis=axis)
# df has a column named L3 and it not an level reference
assert_label_reference(df_ambig, ['L3'], axis=axis)
# Series
# ------
def test_is_level_reference_series_simple_axis0(df):
# Make series with L1 as index
s = df.set_index('L1').L2
assert_level_reference(s, ['L1'], axis=0)
assert not s._is_level_reference('L2')
# Make series with L1 and L2 as index
s = df.set_index(['L1', 'L2']).L3
assert_level_reference(s, ['L1', 'L2'], axis=0)
assert not s._is_level_reference('L3')
def test_is_level_reference_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index('L1').L2
with tm.assert_raises_regex(ValueError, "No axis named 1"):
s._is_level_reference('L1', axis=1)
# Panel
# -----
def test_is_level_reference_panel_error(panel):
msg = ("_is_level_reference is not implemented for {type}"
.format(type=type(panel)))
with tm.assert_raises_regex(NotImplementedError, msg):
panel._is_level_reference('L1', axis=0)
def test_is_label_reference_panel_error(panel):
msg = ("_is_label_reference is not implemented for {type}"
.format(type=type(panel)))
with tm.assert_raises_regex(NotImplementedError, msg):
panel._is_label_reference('L1', axis=0)
def test_is_label_or_level_reference_panel_error(panel):
msg = ("_is_label_or_level_reference is not implemented for {type}"
.format(type=type(panel)))
with tm.assert_raises_regex(NotImplementedError, msg):
panel._is_label_or_level_reference('L1', axis=0)
# Test _check_label_or_level_ambiguity_df
# =======================================
# DataFrame
# ---------
@pytest.mark.parametrize('axis', [0, 1])
def test_check_label_or_level_ambiguity_df(df_ambig, axis):
# Transpose frame if axis == 1
if axis == 1:
df_ambig = df_ambig.T
# df_ambig has both an on-axis level and off-axis label named L1
# Therefore L1 is ambiguous
with tm.assert_produces_warning(FutureWarning,
clear=True,
check_stacklevel=False) as w:
assert df_ambig._check_label_or_level_ambiguity('L1', axis=axis)
warning_msg = w[0].message.args[0]
if axis == 0:
assert warning_msg.startswith("'L1' is both an index level "
"and a column label")
else:
assert warning_msg.startswith("'L1' is both a column level "
"and an index label")
# df_ambig has an on-axis level named L2 and it is not ambiguous
# No warning should be raised
with tm.assert_produces_warning(None):
assert not df_ambig._check_label_or_level_ambiguity('L2', axis=axis)
# df_ambig has an off-axis label named L3 and it is not ambiguous
with tm.assert_produces_warning(None):
assert not df_ambig._is_level_reference('L3', axis=axis)
# Series
# ------
def test_check_label_or_level_ambiguity_series(df):
# A series has no columns and therefore references are never ambiguous
# Make series with L1 as index
s = df.set_index('L1').L2
with tm.assert_produces_warning(None):
assert not s._check_label_or_level_ambiguity('L1', axis=0)
assert not s._check_label_or_level_ambiguity('L2', axis=0)
# Make series with L1 and L2 as index
s = df.set_index(['L1', 'L2']).L3
with tm.assert_produces_warning(None):
assert not s._check_label_or_level_ambiguity('L1', axis=0)
assert not s._check_label_or_level_ambiguity('L2', axis=0)
assert not s._check_label_or_level_ambiguity('L3', axis=0)
def test_check_label_or_level_ambiguity_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index('L1').L2
with tm.assert_raises_regex(ValueError, "No axis named 1"):
s._check_label_or_level_ambiguity('L1', axis=1)
# Panel
# -----
def test_check_label_or_level_ambiguity_panel_error(panel):
msg = ("_check_label_or_level_ambiguity is not implemented for {type}"
.format(type=type(panel)))
with tm.assert_raises_regex(NotImplementedError, msg):
panel._check_label_or_level_ambiguity('L1', axis=0)
# Test _get_label_or_level_values
# ===============================
def assert_label_values(frame, labels, axis):
for label in labels:
if axis == 0:
expected = frame[label]._values
else:
expected = frame.loc[label]._values
result = frame._get_label_or_level_values(label, axis=axis)
assert array_equivalent(expected, result)
def assert_level_values(frame, levels, axis):
for level in levels:
if axis == 0:
expected = frame.index.get_level_values(level=level)._values
else:
expected = (frame.columns
.get_level_values(level=level)
._values)
result = frame._get_label_or_level_values(level, axis=axis)
assert array_equivalent(expected, result)
# DataFrame
# ---------
@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_simple(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis == 1:
df_levels = df_levels.T
# Perform checks
assert_label_values(df_levels, expected_labels, axis=axis)
assert_level_values(df_levels, expected_levels, axis=axis)
@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_ambig(df_ambig, axis):
# Transpose frame if axis == 1
if axis == 1:
df_ambig = df_ambig.T
# df has both an on-axis level and off-axis label named L1
# Therefore L1 is ambiguous but will default to label
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert_label_values(df_ambig, ['L1'], axis=axis)
# df has an on-axis level named L2 and it is not ambiguous
with tm.assert_produces_warning(None):
assert_level_values(df_ambig, ['L2'], axis=axis)
# df has an off-axis label named L3 and it is not ambiguous
with tm.assert_produces_warning(None):
assert_label_values(df_ambig, ['L3'], axis=axis)
@pytest.mark.parametrize('axis', [0, 1])
def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
# Transpose frame if axis == 1
if axis == 1:
df_duplabels = df_duplabels.T
# df has unambiguous level 'L1'
assert_level_values(df_duplabels, ['L1'], axis=axis)
# df has unique label 'L3'
assert_label_values(df_duplabels, ['L3'], axis=axis)
# df has duplicate labels 'L2'
if axis == 0:
expected_msg = "The column label 'L2' is not unique"
else:
expected_msg = "The index label 'L2' is not unique"
with tm.assert_raises_regex(ValueError, expected_msg):
assert_label_values(df_duplabels, ['L2'], axis=axis)
# Series
# ------
def test_get_label_or_level_values_series_axis0(df):
# Make series with L1 as index
s = df.set_index('L1').L2
assert_level_values(s, ['L1'], axis=0)
# Make series with L1 and L2 as index
s = df.set_index(['L1', 'L2']).L3
assert_level_values(s, ['L1', 'L2'], axis=0)
def test_get_label_or_level_values_series_axis1_error(df):
# Make series with L1 as index
s = df.set_index('L1').L2
with tm.assert_raises_regex(ValueError, "No axis named 1"):
s._get_label_or_level_values('L1', axis=1)
# Panel
# -----
def test_get_label_or_level_values_panel_error(panel):
msg = ("_get_label_or_level_values is not implemented for {type}"
.format(type=type(panel)))
with tm.assert_raises_regex(NotImplementedError, msg):
panel._get_label_or_level_values('L1', axis=0)
# Test _drop_labels_or_levels
# ===========================
def assert_labels_dropped(frame, labels, axis):
for label in labels:
df_dropped = frame._drop_labels_or_levels(label, axis=axis)
if axis == 0:
assert label in frame.columns
assert label not in df_dropped.columns
else:
assert label in frame.index
assert label not in df_dropped.index
def assert_levels_dropped(frame, levels, axis):
for level in levels:
df_dropped = frame._drop_labels_or_levels(level, axis=axis)
if axis == 0:
assert level in frame.index.names
assert level not in df_dropped.index.names
else:
assert level in frame.columns.names
assert level not in df_dropped.columns.names
# DataFrame
# ---------
@pytest.mark.parametrize('axis', [0, 1])
def test_drop_labels_or_levels_df(df_levels, axis):
# Compute expected labels and levels
expected_labels, expected_levels = get_labels_levels(df_levels)
# Transpose frame if axis == 1
if axis == 1:
df_levels = df_levels.T
# Perform checks
assert_labels_dropped(df_levels, expected_labels, axis=axis)
assert_levels_dropped(df_levels, expected_levels, axis=axis)
with tm.assert_raises_regex(ValueError, "not valid labels or levels"):
df_levels._drop_labels_or_levels('L4', axis=axis)
# Series
# ------
def test_drop_labels_or_levels_series(df):
# Make series with L1 as index
s = df.set_index('L1').L2
assert_levels_dropped(s, ['L1'], axis=0)
with tm.assert_raises_regex(ValueError, "not valid labels or levels"):
s._drop_labels_or_levels('L4', axis=0)
# Make series with L1 and L2 as index
s = df.set_index(['L1', 'L2']).L3
assert_levels_dropped(s, ['L1', 'L2'], axis=0)
with tm.assert_raises_regex(ValueError, "not valid labels or levels"):
s._drop_labels_or_levels('L4', axis=0)
# Panel
# -----
def test_drop_labels_or_levels_panel_error(panel):
msg = ("_drop_labels_or_levels is not implemented for {type}"
.format(type=type(panel)))
with tm.assert_raises_regex(NotImplementedError, msg):
panel._drop_labels_or_levels('L1', axis=0)
| bsd-3-clause |
leal26/AeroPy | aeropy/morphing/camber_2D.py | 2 | 14358 | # -*- coding: utf-8 -*-
"""
Objective: create an airfoil with a leading edge restriction, same upper length
restriction, othogonal upper spars and constant thicknesses in four places
Created on Mon Oct 17 10:36:34 2016
@author: Pedro
"""
from __future__ import print_function
import os
import math
import numpy as np
from numpy.linalg import inv
from aeropy.geometry.airfoil import CST
from aeropy.CST_2D import *
# Just as quick trick, to make upper morph I just mirror the image in regards to x
inverted = False
# Defines if basckwards or forwards morphing
morphing_direction = 'forwards'
# ==============================================================================
# Calculate dependent shape function parameters
# ==============================================================================
def calculate_dependent_shape_coefficients(Au_C_1_to_n,
psi_spars, Au_P, Al_P, deltaz, c_P,
morphing='backwards'):
"""Calculate dependent shape coefficients for children configuration for a 4 order
Bernstein polynomial and return the children upper, lower shape
coefficients, children chord and spar thicknesses. _P denotes parent parameters"""
def calculate_AC_u0(AC_u0):
Au_C = [AC_u0] + Au_C_1_to_n
c_C = calculate_c_baseline(c_P, Au_C, Au_P, deltaz)
return np.sqrt(c_P/c_C)*Au_P[0]
# Bersntein Polynomial
def K(r, n):
K = math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return K
# Bernstein Polynomial order
n = len(Au_C_1_to_n)
# Find upper shape coefficient though iterative method since Au_0 is unknown
# via fixed point iteration
#AC_u0 = optimize.fixed_point(calculate_AC_u0, Au_P[0])
# print AC_u0
error = 9999
AC_u0 = Au_P[0]
while error > 1e-9:
before = AC_u0
AC_u0 = calculate_AC_u0(AC_u0)
error = abs(AC_u0 - before)
# Because the output is an array, need the extra [0]
Au_C = [AC_u0] + Au_C_1_to_n
# Now that AC_u0 is known we can calculate the actual chord and AC_l0
c_C = calculate_c_baseline(c_P, Au_C, Au_P, deltaz)
AC_l0 = np.sqrt(c_P/c_C)*Al_P[0]
# print(Au_C)
# print(Au_P)
# print(Al_P)
# print(c_C, AC_l0, AC_u0)
# print '0 lower shape coefficient: ',AC_l0
# Calculate thicknessed and tensor B for the constraint linear system problem
spar_thicknesses = []
A0 = AC_u0 + AC_l0
if morphing == 'backwards':
b_list = np.zeros((n, 1))
for j in range(len(psi_spars)):
psi_j = psi_spars[j]
# Calculate the spar thickness in meters from parent, afterwards, need to
# adimensionalize for the goal airfoil by dividing by c_goal
t_j = calculate_spar_distance(psi_spars[j], Au_C, Au_P, Al_P, deltaz, c_P)
spar_thicknesses.append(t_j)
b_list[j] = (t_j/c_C - psi_j*deltaz/c_C)/((psi_j**0.5)*(1-psi_j)) - A0*(1-psi_j)**n
B = np.zeros((n, n))
# j is the row dimension and i the column dimension in this case
for j in range(n):
for i in range(n):
# Because in Python counting starts at 0, need to add 1 to be
# coherent for equations
r = i + 1
B[j][i] = K(r, n)*(psi_spars[j]**r)*(1-psi_spars[j])**(n-r)
A_bar = np.dot(inv(B), b_list)
Al_C = [AC_l0]
for i in range(len(A_bar)):
Al_C.append(A_bar[i][0] - Au_C[i+1]) # extra [0] is necessary because of array
elif morphing == 'forwards':
f = np.zeros((n, 1))
# psi/xi coordinates for lower surface of the children configuration
psi_lower_children = []
xi_lower_children = []
xi_upper_children = []
c_C = calculate_c_baseline(c_P, Au_C, Au_P, deltaz)
# print(c_C, AC_u0, AC_l0)
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_upper_children = []
for j in range(len(psi_spars)):
psi_upper_children.append(calculate_psi_goal(psi_spars[j], Au_P, Au_C, deltaz,
c_P, c_C))
# Calculate xi for upper children. Do not care about lower so just gave it random shape coefficients
xi_upper_children = CST(psi_upper_children, 1., deltasz=[
deltaz/2./c_C, deltaz/2./c_C], Al=Au_C, Au=Au_C)
xi_upper_children = xi_upper_children['u']
# print xi_upper_children
# Debugging section
x = np.linspace(0, 1)
y = CST(x, 1., deltasz=[deltaz/2./c_C, deltaz/2./c_C], Al=Au_C, Au=Au_C)
# plt.plot(x,y['u'])
# plt.scatter(psi_upper_children, xi_upper_children)
# plt.grid()
# plt.show()
# BREAK
for j in range(len(psi_spars)):
xi_parent = CST(psi_spars, 1., deltasz=[
deltaz/2./c_P, deltaz/2./c_P], Al=Al_P, Au=Au_P)
delta_j_P = xi_parent['u'][j]-xi_parent['l'][j]
t_j = c_P*(delta_j_P)
# Claculate orientation for children
s_j = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C)
psi_l_j = psi_upper_children[j]-delta_j_P/c_C*s_j[0]
xi_l_j = xi_upper_children[j]-delta_j_P/c_C*s_j[1]
spar_thicknesses.append(t_j)
psi_lower_children.append(psi_l_j)
xi_lower_children.append(xi_l_j)
f[j] = (2*xi_l_j + psi_l_j*deltaz/c_C) / \
(2*(psi_l_j**0.5)*(psi_l_j-1)) - AC_l0*(1-psi_l_j)**n
F = np.zeros((n, n))
# j is the row dimension and i the column dimension in this case
for j in range(n):
for i in range(n):
# Because in Python counting starts at 0, need to add 1 to be
# coherent for equations
r = i + 1
F[j][i] = K(r, n)*(psi_lower_children[j]**r)*(1-psi_lower_children[j])**(n-r)
A_lower = np.dot(inv(F), f)
Al_C = [AC_l0]
for i in range(len(A_lower)):
Al_C.append(A_lower[i][0]) # extra [0] is necessary because of array
return Au_C, Al_C, c_C, spar_thicknesses
def calculate_shape_coefficients_tracing(A0, x, y, N1, N2, chord=1., EndThickness=0):
"""
inputs:
- tip_displacement: {'x': value, 'y': value}
- other_points: {'x': value, 'y': value}
- A0: float value for first shape coefficient. Usually related to a constraint.
"""
# Bersntein Polynomial
def K(r, n):
K = math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return K
Psi = np.array(x)/chord
Xi = np.array(y)/chord
EndThickness = EndThickness/chord
# If A0 is given (constant leading edge assumption)
if A0 is not None:
n = len(x)
T = np.zeros((n, n))
t = np.zeros((n, 1))
for j in range(1, n+1):
jj = j - 1
for i in range(1, n+1):
ii = i - 1
T[jj][ii] = K(i, n) * Psi[jj]**i * (1-Psi[jj])**(n-i)
t[jj] = (Xi[jj] - Psi[jj]*EndThickness) / \
(Psi[jj]**N1*(1-Psi[jj])**N2) - A0*(1-Psi[jj])**n
# Calculate the inverse
A = np.dot(inv(T), t)
A = [A0] + list(A.transpose()[0])
# If A0 is unknown
else:
n = len(x) - 1
T = np.zeros((n+1, n+1))
t = np.zeros((n+1, 1))
for j in range(n+1):
for i in range(n+1):
T[j][i] = K(i, n) * Psi[j]**i * (1-Psi[j])**(n-i)
t[j] = (Xi[j] - Psi[j]*EndThickness)/(Psi[j]**N1*(1-Psi[j])**N2)
# Calculate the inverse
A = np.dot(inv(T), t)
A = list(A.transpose()[0])
return A
def calculate_strains(Au_P, Al_P, c_P, Au_C, Al_C, c_C, deltaz, psi_spars, spar_thicknesses):
# Calculate psi_flats (non-dimensional location of the itersection of
# the spars with the lower surface
psi_flats = []
for j in range(len(psi_spars)):
psi_parent_j = psi_spars[j]
# Calculate psi at landing
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_children_j = calculate_psi_goal(psi_parent_j, Au_P, Au_C, deltaz, c_P, c_C)
x_children_j = psi_children_j*c_C
s = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C)
psi_flats.append(x_children_j - spar_thicknesses[j]*s[0])
# Calculate initial lengths
initial_lengths = []
psi_list = [0.] + psi_spars + [c_P]
for i in range(len(psi_list)-1):
initial_lengths.append(calculate_arc_length(psi_list[i], psi_list[i+1], Al_P, deltaz, c_P))
# Calculate final lengths
final_lengths = []
psi_list = [0.] + psi_flats + [c_C] # In P configuration
for i in range(len(psi_list)-1):
final_lengths.append(calculate_arc_length(
psi_list[i]*c_P/c_C, psi_list[i+1]*c_P/c_C, Al_C, deltaz, c_C))
# Calculate strains
strains = []
for i in range(len(final_lengths)):
strains.append((final_lengths[i]-initial_lengths[i])/initial_lengths[i])
av_strain = (sum(final_lengths)-sum(initial_lengths))/sum(initial_lengths)
# for i in range(len(strains)):
# print 'Initial length: ' + str(initial_lengths[i]) + ', final length: ' + str(final_lengths[i]) + ', strains: ' + str(strains[i])
return strains, av_strain
def plot_airfoil(AC, psi_spars, c_P, deltaz, Au_P, Al_P, image='plot',
iteration=0, return_coordinates=True, dir='current',
morphing_direction='backwards'):
import matplotlib.pyplot as plt
# plt.figure()
n = len(Au_P) - 1
Au_C, Al_C, c_C, spar_thicknesses = calculate_dependent_shape_coefficients(
AC,
psi_spars, Au_P, Al_P,
deltaz, c_P, morphing=morphing_direction)
print('CST chord', c_C)
# ==============================================================================
# Plot results
# ==============================================================================
np.set_printoptions(precision=20)
x = np.linspace(0, c_C, 1000)
y = CST(x, c_C, deltasz=[deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
plt.plot(x, y['u'], 'b', label='Children', lw=1)
plt.plot(x, y['l'], '-b', label=None, lw=1)
# store variables in case return_coordinates is True
x = list(x[::-1]) + list(x[1:])
y = list(y['u'][::-1]) + list(y['l'][1:])
children_coordinates = {'x': x, 'y': y}
x = np.linspace(0, c_P, 1000)
y = CST(x, c_P, deltasz=[deltaz/2., deltaz/2.], Al=Al_P, Au=Au_P)
plt.plot(x, y['u'], 'r--', label='Parent', lw=1)
plt.plot(x, y['l'], 'r--', label=None, lw=1)
y_limits = y
if morphing_direction == 'backwards':
for i in range(len(psi_spars)):
psi_i = psi_spars[i]
# Calculate psi at landing
psi_goal_i = calculate_psi_goal(psi_i, Au_C, Au_P, deltaz, c_C, c_P)
x_goal_i = psi_goal_i*c_P
# Calculate xi at landing
temp = CST(x_goal_i, c_P, [deltaz/2., deltaz/2.], Al=Al_P, Au=Au_P)
y_goal_i = temp['u']
# calculate spar direction
s = calculate_spar_direction(psi_i, Au_C, Au_P, deltaz, c_P)
plt.plot([x_goal_i, x_goal_i - spar_thicknesses[i]*s[0]],
[y_goal_i, y_goal_i - spar_thicknesses[i]*s[1]], 'r--')
y = CST(np.array([psi_i*c_C]), c_C, deltasz=[deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
plt.plot([psi_i*c_C, psi_i*c_C], [y['u'], y['u']-spar_thicknesses[i]], 'b', label=None)
elif morphing_direction == 'forwards':
for j in range(len(psi_spars)):
psi_parent_j = psi_spars[j]
# Calculate psi at landing
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_children_j = calculate_psi_goal(psi_parent_j, Au_P, Au_C, deltaz, c_P, c_C)
x_children_j = psi_children_j*c_C
# Calculate xi at landing
temp = CST(x_children_j, c_C, [deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
y_children_j = temp['u']
s = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C)
# Print spars for children
if not inverted:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]], [y_children_j,
y_children_j - spar_thicknesses[j]*s[1]], c='b', lw=1, label=None)
else:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]],
[-y_children_j, -y_children_j + spar_thicknesses[j]*s[1]], c='b', lw=1, label=None)
y = CST(np.array([psi_parent_j*c_P]), c_P,
deltasz=[deltaz/2., deltaz/2.], Al=Al_P, Au=Au_P)
# Print spars for parents
if not inverted:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P],
[y['u'], y['u']-spar_thicknesses[j]], 'r--', lw=1, label=None)
else:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P], [-y['u'], -
y['u']+spar_thicknesses[j]], 'r--', lw=1, label=None)
plt.plot([psi_i*c_C, psi_i*c_C], [y['u'], y['u'] -
spar_thicknesses[i]], 'b', label=None)
plt.xlabel('$\psi$', fontsize=16)
plt.ylabel(r'$\xi$', fontsize=16)
plt.grid()
plt.legend(loc="upper right")
plt.gca().set_aspect(2, adjustable='box')
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, 2*y2))
# plt.axis([-0.005, c_L+0.005, min(y_limits['l'])-0.005, max(y_limits['l'])+0.01])
if image == 'plot':
plt.show()
elif image == 'save':
if dir == 'current':
plt.savefig('%03i.pdf' % (iteration), bbox_inches='tight')
else:
cwd = os.getcwd()
directory = os.path.join(cwd, dir)
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, '%05i.png' % (iteration))
plt.savefig(filename, bbox_inches='tight')
if return_coordinates:
return children_coordinates
| mit |
Ensembles/ert | python/python/ert_gui/plottery/plots/gaussian_kde.py | 4 | 1916 | import numpy
from scipy.stats import gaussian_kde
from .plot_tools import PlotTools
import pandas as pd
def plotGaussianKDE(plot_context):
"""
@type plot_context: ert_gui.plottery.PlotContext
"""
ert = plot_context.ert()
key = plot_context.key()
config = plot_context.plotConfig()
axes = plot_context.figure().add_subplot(111)
""":type: matplotlib.axes.Axes """
plot_context.deactivateDateSupport()
plot_context.x_axis = plot_context.VALUE_AXIS
plot_context.y_axis = plot_context.DENSITY_AXIS
if key.startswith("LOG10_"):
key = key[6:]
axes.set_xscale("log")
case_list = plot_context.cases()
for case in case_list:
data = plot_context.dataGatherer().gatherData(ert, case, key)
if not data.empty and data.nunique() > 1:
_plotGaussianKDE(axes, config, data, case)
config.nextColor()
PlotTools.finalizePlot(plot_context, axes, default_x_label="Value", default_y_label="Density")
def _plotGaussianKDE(axes, plot_config, data, label):
"""
@type axes: matplotlib.axes.Axes
@type plot_config: PlotConfig
@type data: DataFrame
@type label: Str
"""
style = plot_config.histogramStyle()
if data.dtype == "object":
try:
data = pd.to_numeric(data, errors='coerce')
except AttributeError:
data = data.convert_objects(convert_numeric=True)
if data.dtype == "object":
pass
else:
sample_range = data.max() - data.min()
indexes = numpy.linspace(data.min() - 0.5 * sample_range, data.max() + 0.5 * sample_range, 1000)
gkde = gaussian_kde(data.values)
evaluated_gkde = gkde.evaluate(indexes)
lines = axes.plot(indexes, evaluated_gkde, linewidth=style.width, color=style.color, alpha=style.alpha)
if len(lines) > 0:
plot_config.addLegendItem(label, lines[0]) | gpl-3.0 |
hsuantien/scikit-learn | sklearn/utils/multiclass.py | 92 | 13986 | # Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
ricorx7/donkey | donkeycar/templates/donkey2.py | 2 | 8737 | #!/usr/bin/env python3
"""
Scripts to drive a donkey 2 car and train a model for it.
Usage:
manage.py (drive) [--model=<model>] [--js]
manage.py (train) [--tub=<tub1,tub2,..tubn>] (--model=<model>)
manage.py (calibrate)
manage.py (check) [--tub=<tub1,tub2,..tubn>] [--fix]
manage.py (analyze) [--tub=<tub1,tub2,..tubn>] (--op=<histogram>) (--rec=<"user/angle">)
Options:
-h --help Show this screen.
--js Use physical joystick.
--fix Remove records which cause problems.
"""
import os
from docopt import docopt
import donkeycar as dk
def drive(cfg, model_path=None, use_joystick=False):
'''
Construct a working robotic vehicle from many parts.
Each part runs as a job in the Vehicle loop, calling either
it's run or run_threaded method depending on the constructor flag `threaded`.
All parts are updated one after another at the framerate given in
cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
Parts may have named outputs and inputs. The framework handles passing named outputs
to parts requesting the same named input.
'''
#Initialize car
V = dk.vehicle.Vehicle()
cam = dk.parts.PiCamera(resolution=cfg.CAMERA_RESOLUTION)
V.add(cam, outputs=['cam/image_array'], threaded=True)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
ctr = dk.parts.JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
else:
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = dk.parts.LocalWebController()
V.add(ctr,
inputs=['cam/image_array'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
#See if we should even run the pilot module.
#This is only needed because the part run_contion only accepts boolean
def pilot_condition(mode):
if mode == 'user':
return False
else:
return True
pilot_condition_part = dk.parts.Lambda(pilot_condition)
V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])
#Run the pilot if the mode is not user.
kl = dk.parts.KerasCategorical()
if model_path:
kl.load(model_path)
V.add(kl, inputs=['cam/image_array'],
outputs=['pilot/angle', 'pilot/throttle'],
run_condition='run_pilot')
#Choose what inputs should change the car.
def drive_mode(mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle, user_throttle
else:
return pilot_angle, pilot_throttle
drive_mode_part = dk.parts.Lambda(drive_mode)
V.add(drive_mode_part,
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
steering_controller = dk.parts.PCA9685(cfg.STEERING_CHANNEL)
steering = dk.parts.PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = dk.parts.PCA9685(cfg.THROTTLE_CHANNEL)
throttle = dk.parts.PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
#add tub to save data
inputs=['cam/image_array',
'user/angle', 'user/throttle',
'user/mode']
types=['image_array',
'float', 'float',
'str']
th = dk.parts.TubHandler(path=cfg.DATA_PATH)
tub = th.new_tub_writer(inputs=inputs, types=types)
V.add(tub, inputs=inputs, run_condition='recording')
#run the vehicle for 20 seconds
V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
max_loop_count=cfg.MAX_LOOPS)
print("You can now go to <your pi ip address>:8887 to drive your car.")
def expand_path_masks(paths):
'''
take a list of paths and expand any wildcards
returns a new list of paths fully expanded
'''
import glob
expanded_paths = []
for path in paths:
if '*' in path or '?' in path:
mask_paths = glob.glob(path)
expanded_paths += mask_paths
else:
expanded_paths.append(path)
return expanded_paths
def gather_tubs(cfg, tub_names):
if tub_names:
tub_paths = [os.path.expanduser(n) for n in tub_names.split(',')]
tub_paths = expand_path_masks(tub_paths)
else:
tub_paths = [os.path.join(cfg.DATA_PATH, n) for n in os.listdir(cfg.DATA_PATH)]
tubs = [dk.parts.Tub(p) for p in tub_paths]
return tubs
def train(cfg, tub_names, model_name):
'''
use the specified data in tub_names to train an artifical neural network
saves the output trained model as model_name
'''
X_keys = ['cam/image_array']
y_keys = ['user/angle', 'user/throttle']
def rt(record):
record['user/angle'] = dk.utils.linear_bin(record['user/angle'])
return record
kl = dk.parts.KerasCategorical()
tubs = gather_tubs(cfg, tub_names)
import itertools
gens = [tub.train_val_gen(X_keys, y_keys, record_transform=rt, batch_size=cfg.BATCH_SIZE, train_split=cfg.TRAIN_TEST_SPLIT) for tub in tubs]
# Training data generator is the one that keeps cycling through training data generator of all tubs chained together
# The same for validation generator
train_gens = itertools.cycle(itertools.chain(*[gen[0] for gen in gens]))
val_gens = itertools.cycle(itertools.chain(*[gen[1] for gen in gens]))
model_path = os.path.expanduser(model_name)
total_records = sum([t.get_num_records() for t in tubs])
total_train = int(total_records * cfg.TRAIN_TEST_SPLIT)
total_val = total_records - total_train
print('train: %d, validation: %d' %(total_train, total_val))
steps_per_epoch = total_train // cfg.BATCH_SIZE
print('steps_per_epoch', steps_per_epoch)
kl.train(train_gens,
val_gens,
saved_model_path=model_path,
steps=steps_per_epoch,
train_split=cfg.TRAIN_TEST_SPLIT)
def calibrate():
channel = int(input('Enter the channel your actuator uses (0-15).'))
c = dk.parts.PCA9685(channel)
for i in range(10):
pmw = int(input('Enter a PWM setting to test(100-600)'))
c.run(pmw)
def check(cfg, tub_names, fix=False):
'''
Check for any problems. Looks at tubs and find problems in any records or images that won't open.
If fix is True, then delete images and records that cause problems.
'''
tubs = gather_tubs(cfg, tub_names)
for tub in tubs:
tub.check(fix=fix)
def anaylze(cfg, tub_names, op, record):
'''
look at the tub data and produce some analysis
'''
tubs = gather_tubs(cfg, tub_names)
if op == 'histogram':
import matplotlib.pyplot as plt
samples = []
for tub in tubs:
num_records = tub.get_num_records()
for iRec in range(0, num_records):
json_data = tub.get_json_record(iRec)
sample = json_data[record]
samples.append(float(sample))
plt.hist(samples, 50)
plt.xlabel(record)
plt.show()
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config()
if args['drive']:
drive(cfg, model_path = args['--model'], use_joystick=args['--js'])
elif args['calibrate']:
calibrate()
elif args['train']:
tub = args['--tub']
model = args['--model']
train(cfg, tub, model)
elif args['check']:
tub = args['--tub']
fix = args['--fix']
check(cfg, tub, fix)
elif args['analyze']:
tub = args['--tub']
op = args['--op']
rec = args['--rec']
anaylze(cfg, tub, op, rec)
| mit |
zingale/hydro_examples | compressible/MOL/python/euler_mol.py | 1 | 8665 | import numpy as np
import matplotlib.pyplot as plt
import riemann
URHO = 0
UMX = 1
UENER = 2
QRHO = 0
QU = 1
QP = 2
NVAR = 3
class FVGrid(object):
def __init__(self, nx, ng, xmin=0.0, xmax=1.0, bcs="outflow"):
self.xmin = xmin
self.xmax = xmax
self.ng = ng
self.nx = nx
self.bcs = bcs
# python is zero-based. Make easy intergers to know where the
# real data lives
self.ilo = ng
self.ihi = ng+nx-1
# physical coords -- cell-centered, left and right edges
self.dx = (xmax - xmin)/(nx)
self.x = xmin + (np.arange(nx+2*ng)-ng+0.5)*self.dx
self.xl = xmin + (np.arange(nx+2*ng)-ng)*self.dx
self.xr = xmin + (np.arange(nx+2*ng)-ng+1.0)*self.dx
def scratch_array(self, nc=1):
""" return a scratch array dimensioned for our grid """
if nc == 1:
return np.zeros((self.nx+2*self.ng), dtype=np.float64)
else:
return np.zeros((self.nx+2*self.ng, nc), dtype=np.float64)
def norm(self, e):
""" return the norm of quantity e which lives on the grid """
if not len(e) == (2*self.ng + self.nx):
return None
return np.sqrt(self.dx*np.sum(e[self.ilo:self.ihi+1]**2))
def fill_BCs(self, atmp):
""" fill all single ghostcell with periodic boundary conditions """
try:
nc = atmp.shape[1]
except:
nc = 1
# outflow
if self.bcs == "outflow":
if nc == 1:
atmp[0:self.ilo] = atmp[self.ilo]
atmp[self.ihi+1:] = atmp[self.ihi]
else:
for n in range(nc):
atmp[0:self.ilo, n] = atmp[self.ilo, n]
atmp[self.ihi+1:, n] = atmp[self.ihi, n]
elif self.bcs == "periodic":
if nc == 1:
for i in range(self.ng):
atmp[self.ilo-1-i] = atmp[self.ihi-i]
atmp[self.ihi+1+i] = atmp[self.ilo+i]
else:
for n in range(nc):
for i in range(self.ng):
atmp[self.ilo-1-i, n] = atmp[self.ihi-i, n]
atmp[self.ihi+1+i, n] = atmp[self.ilo+i, n]
class Simulation(object):
def __init__(self, nx, params):
self.params = params
try:
bcs = params["bcs"]
except KeyError:
bcs = "outflow"
# create a grid
self.gr = FVGrid(nx, ng=2, bcs=bcs)
def cons_to_prim(self, U):
q = self.gr.scratch_array(nc=NVAR)
gamma = self.params['gamma']
q[:, QRHO] = U[:, URHO]
q[:, QU] = U[:, UMX]/U[:, URHO]
q[:, QP] = (U[:, UENER] - 0.5*q[:, QRHO]*q[:, QU]**2)*(gamma - 1.0)
return q
def flux_update(self, U):
gamma = self.params['gamma']
# convert to primitive
q = self.cons_to_prim(U)
# construct the slopes
dq = self.gr.scratch_array(nc=NVAR)
for n in range(NVAR):
a = q[:, n]
# MC slope
ib = self.gr.ilo-1
ie = self.gr.ihi+1
dc = self.gr.scratch_array()
dl = self.gr.scratch_array()
dr = self.gr.scratch_array()
dc[ib:ie+1] = 0.5*(a[ib+1:ie+2] - a[ib-1:ie ])
dl[ib:ie+1] = a[ib+1:ie+2] - a[ib :ie+1]
dr[ib:ie+1] = a[ib :ie+1] - a[ib-1:ie ]
# these where's do a minmod()
d1 = 2.0*np.where(np.fabs(dl) < np.fabs(dr), dl, dr)
d2 = np.where(np.fabs(dc) < np.fabs(d1), dc, d1)
dq[:, n] = np.where(dl*dr > 0.0, d2, 0.0)
#dq[:, n] = dc
# now make the states
q_l = self.gr.scratch_array(nc=NVAR)
q_l[self.gr.ilo:self.gr.ihi+2, :] = q[self.gr.ilo-1:self.gr.ihi+1, :] + 0.5*dq[self.gr.ilo-1:self.gr.ihi+1, :]
q_r = self.gr.scratch_array(nc=NVAR)
q_r[self.gr.ilo:self.gr.ihi+2, :] = q[self.gr.ilo:self.gr.ihi+2, :] - 0.5*dq[self.gr.ilo:self.gr.ihi+2, :]
# now solve the Riemann problem
flux = self.gr.scratch_array(nc=NVAR)
for i in range(self.gr.ilo, self.gr.ihi+2):
flux[i, :] = riemann.riemann(q_l[i, :], q_r[i, :], gamma)
A = self.gr.scratch_array(nc=NVAR)
for n in range(NVAR):
A[self.gr.ilo:self.gr.ihi+1, n] = (flux[self.gr.ilo:self.gr.ihi+1, n] -
flux[self.gr.ilo+1:self.gr.ihi+2, n])/self.gr.dx
return A
def init_cond(self, U):
idx_l = self.gr.x < 0.5
idx_r = self.gr.x >= 0.5
U[idx_l, URHO] = self.params['rho_l']
U[idx_l, UMX] = self.params['rho_l'] * self.params['u_l']
U[idx_l, UENER] = self.params['p_l']/(self.params['gamma'] - 1.0) + 0.5 * self.params['rho_l'] * self.params['u_l']**2
U[idx_r, URHO] = self.params['rho_r']
U[idx_r, UMX] = self.params['rho_r'] * self.params['u_r']
U[idx_r, UENER] = self.params['p_r']/(self.params['gamma'] - 1.0) + 0.5 * self.params['rho_r'] * self.params['u_r']**2
def timestep(self, U):
# compute the sound speed
q = self.cons_to_prim(U)
c = self.gr.scratch_array()
c[self.gr.ilo:self.gr.ihi+1] = np.sqrt(self.params['gamma'] *
q[self.gr.ilo:self.gr.ihi+1,QP] /
q[self.gr.ilo:self.gr.ihi+1,QRHO])
dt = self.params['cfl'] * self.gr.dx / (np.abs(q[self.gr.ilo:self.gr.ihi+1, QU]) +
c[self.gr.ilo:self.gr.ihi+1]).max()
return dt
def mol_update(self):
U = self.gr.scratch_array(nc=NVAR)
# setup initial conditions
self.init_cond(U)
t = 0.0
tmax = self.params['tmax']
try:
verbose = self.params['verbose']
except KeyError:
verbose = 1
istep = 0
while t < tmax:
# compute the timestep
dt = self.timestep(U)
if t + dt > tmax:
dt = tmax - t
# second-order RK integration
self.gr.fill_BCs(U)
k1 = self.flux_update(U)
U_tmp = self.gr.scratch_array(nc=NVAR)
for n in range(NVAR):
U_tmp[:, n] = U[:, n] + 0.5 * dt * k1[:, n]
self.gr.fill_BCs(U_tmp)
k2 = self.flux_update(U_tmp)
for n in range(NVAR):
U[:, n] += dt * k2[:, n]
t += dt
istep += 1
if verbose:
print(istep, t, dt)
return self.gr, U
if __name__ == "__main__":
nx = 128
# Sod's problem
params = {
'rho_l': 1.0,
'u_l': 0.0,
'p_l': 1.0,
'rho_r': 0.125,
'u_r': 0.0,
'p_r': 0.1,
'tmax': 0.2,
'gamma': 1.4,
'cfl': 0.8
}
sim = Simulation(nx, params)
gr, U = sim.mol_update()
exact = np.loadtxt("../sod_exact.out")
plt.clf()
plt.plot(gr.x, U[:, 0], "x")
plt.plot(exact[:,0], exact[:,1])
plt.savefig("sod.png", dpi=150)
# double rarefaction
params = {
'rho_l': 1.0,
'u_l': -2.0,
'p_l': 0.4,
'rho_r': 1.0,
'u_r': 2.0,
'p_r': 0.4,
'tmax': 0.15,
'gamma': 1.4,
'cfl': 0.8
}
sim = Simulation(nx, params)
gr, U = sim.mol_update()
exact = np.loadtxt("../double_rarefaction_exact.out")
plt.clf()
plt.plot(gr.x, U[:, 0], "x")
plt.plot(exact[:,0], exact[:,1])
plt.savefig("double_rarefaction.png", dpi=150)
# strong shock
params = {
'rho_l': 1.0,
'u_l': 0.0,
'p_l': 1000.0,
'rho_r': 1.0,
'u_r': 0.0,
'p_r': 0.01,
'tmax': 0.012,
'gamma': 1.4,
'cfl': 0.8
}
sim = Simulation(nx, params)
gr, U = sim.mol_update()
exact = np.loadtxt("../strong_shock_exact.out")
plt.clf()
plt.plot(gr.x, U[:, 0], "x")
plt.plot(exact[:,0], exact[:,1])
plt.savefig("strong_shock.png", dpi=150)
# slow moving shock
params = {
'rho_l': 5.6698,
'u_l': -1.4701,
'p_l': 100.0,
'rho_r': 1.0,
'u_r': -10.5,
'p_r': 1.0,
'tmax': 1.0,
'gamma': 1.4,
'cfl': 0.8
}
sim = Simulation(nx, params)
gr, U = sim.mol_update()
exact = np.loadtxt("../slow_shock_exact.out")
plt.clf()
plt.plot(gr.x, U[:, 0], "x")
plt.plot(exact[:,0], exact[:,1])
plt.savefig("slow_shock.png", dpi=150)
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/graphics/tests/test_functional.py | 30 | 2816 | from statsmodels.compat.python import range
import numpy as np
from numpy.testing import dec, assert_equal, assert_almost_equal
from statsmodels.graphics.functional import \
banddepth, fboxplot, rainbowplot
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def test_banddepth_BD2():
xx = np.arange(500) / 150.
y1 = 1 + 0.5 * np.sin(xx)
y2 = 0.3 + np.sin(xx + np.pi/6)
y3 = -0.5 + np.sin(xx + np.pi/6)
y4 = -1 + 0.3 * np.cos(xx + np.pi/6)
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='BD2')
expected_depth = [0.5, 5./6, 5./6, 0.5]
assert_almost_equal(depth, expected_depth)
## Plot to visualize why we expect this output
#fig = plt.figure()
#ax = fig.add_subplot(111)
#for ii, yy in enumerate([y1, y2, y3, y4]):
# ax.plot(xx, yy, label="y%s" % ii)
#ax.legend()
#plt.show()
def test_banddepth_MBD():
xx = np.arange(5001) / 5000.
y1 = np.zeros(xx.shape)
y2 = 2 * xx - 1
y3 = np.ones(xx.shape) * 0.5
y4 = np.ones(xx.shape) * -0.25
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='MBD')
expected_depth = [5./6, (2*(0.75-3./8)+3)/6, 3.5/6, (2*3./8+3)/6]
assert_almost_equal(depth, expected_depth, decimal=4)
@dec.skipif(not have_matplotlib)
def test_fboxplot_rainbowplot():
# Test fboxplot and rainbowplot together, is much faster.
def harmfunc(t):
"""Test function, combination of a few harmonic terms."""
# Constant, 0 with p=0.9, 1 with p=1 - for creating outliers
ci = int(np.random.random() > 0.9)
a1i = np.random.random() * 0.05
a2i = np.random.random() * 0.05
b1i = (0.15 - 0.1) * np.random.random() + 0.1
b2i = (0.15 - 0.1) * np.random.random() + 0.1
func = (1 - ci) * (a1i * np.sin(t) + a2i * np.cos(t)) + \
ci * (b1i * np.sin(t) + b2i * np.cos(t))
return func
np.random.seed(1234567)
# Some basic test data, Model 6 from Sun and Genton.
t = np.linspace(0, 2 * np.pi, 250)
data = []
for ii in range(20):
data.append(harmfunc(t))
# fboxplot test
fig = plt.figure()
ax = fig.add_subplot(111)
_, depth, ix_depth, ix_outliers = fboxplot(data, wfactor=2, ax=ax)
ix_expected = np.array([13, 4, 15, 19, 8, 6, 3, 16, 9, 7, 1, 5, 2,
12, 17, 11, 14, 10, 0, 18])
assert_equal(ix_depth, ix_expected)
ix_expected2 = np.array([2, 11, 17, 18])
assert_equal(ix_outliers, ix_expected2)
plt.close(fig)
# rainbowplot test (re-uses depth variable)
xdata = np.arange(data[0].size)
fig = rainbowplot(data, xdata=xdata, depth=depth, cmap=plt.cm.rainbow)
plt.close(fig)
| bsd-3-clause |
interrogator/corpkit-app | gui.py | 1 | 316378 | #!/usr/bin/env python
"""
# corpkit GUI
# Daniel McDonald
# This file conains the frontend side of the corpkit gui.
# You can use py2app or pyinstaller on it to make a .app,
# or just run it as a script.
# Below is a string that is used to determine when minor
# updates are available on github for automatic download:
# <updated>DATE-REPLACE</updated>
# Tabbed notebook template created by:
# Patrick T. Cossette <[email protected]>
"""
from __future__ import print_function
import sys
# is string used?
import string
import time
import os
# is threading used?
import threading
try:
import tkMessageBox as messagebox
import tkSimpleDialog as simpledialog
import tkFileDialog as filedialog
except ImportError:
import tkinter.messagebox
import tkinter.filedialog
import tkinter.simpledialog
try:
import Tkinter as tkinter
from Tkinter import *
from ttk import Progressbar, Style
from Tkinter import _setit
except ImportError:
import tkinter
from tkinter import *
from tkinter.ttk import Progressbar, Style
from tkinter import _setit
# todo: delete from the rest of code
from corpkit.corpus import Corpus
# determine path to gui resources:
py_script = False
from_py = False
rd = sys.argv[0]
if sys.platform == 'darwin':
key = 'Mod1'
fext = 'app'
if '.app' in rd:
rd = os.path.join(rd.split('.app', 1)[0] + '.app', 'Contents', 'MacOS')
else:
import corpkit
rd = os.path.dirname(corpkit.__file__)
from_py = True
else:
key = 'Control'
fext = 'exe'
if '.py' in rd:
py_script = True
rd = os.path.dirname(os.path.join(rd.split('.py', 1)[0]))
########################################################################
class SplashScreen(object):
"""
A simple splash screen to display before corpkit is loaded.
"""
def __init__(self, tkRoot, imageFilename, minSplashTime=0):
import os
# if there is some PIL issue, just don't show GUI
# todo: this would also need to disable display of previous figures
self._can_operate = True
try:
from PIL import Image
from PIL import ImageTk
except ImportError:
self._can_operate = False
return
self._root = tkRoot
fname = os.path.join(rd, imageFilename)
if os.path.isfile(fname):
self._image = ImageTk.PhotoImage(file=fname)
self._splash = None
self._minSplashTime = time.time() + minSplashTime
else:
self._image = False
def __enter__(self):
# Remove the app window from the display
#self._root.withdraw( )
if not self._can_operate:
return
if not self._image:
return
# Calculate the geometry to center the splash image
scrnWt = self._root.winfo_screenwidth()
scrnHt = self._root.winfo_screenheight()
imgWt = self._image.width()
imgHt = self._image.height()
imgXPos = (scrnWt / 2) - (imgWt / 2)
imgYPos = (scrnHt / 2) - (imgHt / 2)
# Create the splash screen
self._splash = Toplevel()
self._splash.overrideredirect(1)
self._splash.geometry('+%d+%d' % (imgXPos, imgYPos))
background_label = Label(self._splash, image=self._image)
background_label.grid(row=1, column=1, sticky=W)
# this code shows the version number, but it's ugly.
#import corpkit
#oldstver = str(corpkit.__version__)
#txt = 'Loading corpkit v%s ...' % oldstver
#cnv = Canvas(self._splash, width=200, height=20)
#cnv.create_text((100, 14), text=txt, font=("Helvetica", 14, "bold"))
#cnv.grid(row=1, column=1, sticky='SW', padx=20, pady=20)
self._splash.lift()
self._splash.update( )
def __exit__(self, exc_type, exc_value, traceback ):
# Make sure the minimum splash time has elapsed
if not self._can_operate:
return
if not self._image:
return
timeNow = time.time()
if timeNow < self._minSplashTime:
time.sleep( self._minSplashTime - timeNow )
# Destroy the splash window
self._splash.destroy( )
# Display the application window
#self._root.deiconify( )
class RedirectText(object):
"""Send text to app from stdout, for the log and the status bar"""
def __init__(self, text_ctrl, log_text, text_widget):
"""Constructor"""
def dumfun():
"""to satisfy ipython, sys, which look for a flush method"""
pass
self.output = text_ctrl
self.log = log_text
self.flush = dumfun
self.fileno = dumfun
self.text_widget = text_widget
def write(self, string):
"""Add stdout and stderr to log and/or to console"""
import re
# don't show blank lines
show_reg = re.compile(r'^\s*$')
# delete lobal abs paths from traceback
del_reg = re.compile(r'^/*(Users|usr).*/(site-packages|corpkit/corpkit/)')
if 'Parsing file' not in string and 'Initialising parser' not in string \
and not 'Interrogating subcorpus' in string:
if not re.match(show_reg, string):
string = re.sub(del_reg, '', string)
self.log.append(string.rstrip('\n'))
self.text_widget.config(state='normal')
self.text_widget.delete(1.0, 'end')
self.text_widget.insert('end', string.rstrip('\n'))
self.text_widget.config(state='disabled')
if not re.match(show_reg, string):
if not string.lstrip().startswith('#') and not string.lstrip().startswith('import'):
string = re.sub(del_reg, '', string).rstrip('\n').rstrip()
string = string.split('\n')[-1]
self.output.set(string.lstrip().rstrip('\n').rstrip())
self.text_widget.config(state='normal')
self.text_widget.delete(1.0, 'end')
self.text_widget.insert('end', string.lstrip().rstrip('\n').rstrip())
self.text_widget.config(state='disabled')
class Label2(Frame):
"""a label whose size can be specified in pixels"""
def __init__(self, master, width=0, height=0, **kwargs):
self.width = width
self.height = height
Frame.__init__(self, master, width=self.width, height=self.height)
self.label_widget = Text(self, width=1, **kwargs)
self.label_widget.pack(fill='both', expand=True)
#self.label_widget.config(state=DISABLED)
def pack(self, *args, **kwargs):
Frame.pack(self, *args, **kwargs)
self.pack_propagate(False)
def grid(self, *args, **kwargs):
Frame.grid(self, *args, **kwargs)
self.grid_propagate(False)
class HyperlinkManager:
"""Hyperlinking for About"""
def __init__(self, text):
self.text=text
self.text.tag_config("hyper", foreground="blue", underline=1)
self.text.tag_bind("hyper", "<Enter>", self._enter)
self.text.tag_bind("hyper", "<Leave>", self._leave)
self.text.tag_bind("hyper", "<Button-1>", self._click)
self.reset()
def reset(self):
self.links = {}
def add(self, action):
# add an action to the manager. returns tags to use in
# associated text widget
tag = "hyper-%d" % len(self.links)
self.links[tag] = action
return "hyper", tag
def _enter(self, event):
self.text.config(cursor="hand2")
def _leave(self, event):
self.text.config(cursor="")
def _click(self, event):
for tag in self.text.tag_names(CURRENT):
if tag[:6] == "hyper-":
self.links[tag]()
return
class Notebook(Frame):
"""Notebook Widget"""
def __init__(self, parent, activerelief=RAISED, inactiverelief=FLAT,
xpad=4, ypad=6, activefg='black', inactivefg='black', debug=False,
activefc=("Helvetica", 14, "bold"), inactivefc=("Helvetica", 14), **kw):
"""Construct a Notebook Widget
Notebook(self, parent, activerelief = RAISED, inactiverelief = RIDGE,
xpad = 4, ypad = 6, activefg = 'black', inactivefg = 'black', **kw)
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, relief, takefocus, visual, width, activerelief,
inactiverelief, xpad, ypad.
xpad and ypad are values to be used as ipady and ipadx
with the Label widgets that make up the tabs. activefg and inactivefg define what
color the text on the tabs when they are selected, and when they are not
"""
self.activefg = activefg
self.inactivefg = inactivefg
self.activefc = activefc
self.inactivefc = inactivefc
self.deletedTabs = []
self.xpad = xpad
self.ypad = ypad
self.activerelief = activerelief
self.inactiverelief = inactiverelief
self.tabVars = {}
self.tabs = 0
self.progvar = DoubleVar()
self.progvar.set(0)
self.style = Style()
self.style.theme_use("default")
self.style.configure("TProgressbar", thickness=15, foreground='#347DBE', background='#347DBE')
self.kwargs = kw
self.tabVars = {}
self.tabs = 0
# the notebook, with its tabs, middle, status bars
self.noteBookFrame = Frame(parent, bg='#c5c5c5')
self.BFrame = Frame(self.noteBookFrame, bg='#c5c5c5')
self.statusbar = Frame(self.noteBookFrame, bd=2, height=24, width=kw.get('width'), bg='#F4F4F4')
self.noteBook = Frame(self.noteBookFrame, relief=RAISED, bd=2, **kw)
self.noteBook.grid_propagate(0)
# status bar text and log
self.status_text=StringVar()
self.log_stream = []
#self.progspace = Frame(self.statusbar, width=int(kw.get('width') * 0.4))
#self.progspace.grid(sticky=E)
#self.statusbar.grid_columnconfigure(2, weight=5)
self.text = Label2(self.statusbar, #textvariable=self.status_text,
width=int(kw.get('width') * 0.65), height=24, font=("Courier New", 13))
self.progbar = Progressbar(self.statusbar, orient='horizontal',
length=int(kw.get('width') * 0.35),
mode='determinate', variable=self.progvar,
style="TProgressbar")
#self.statusbar.grid_columnconfigure(1, weight=2)
self.statusbar.grid(row=2, column=0)
#self.progbar.pack(anchor=E, fill='x')
self.text.pack(side=LEFT)
self.progbar.pack(side=RIGHT, expand=True)
#self.statusbar.grid_propagate()
# redirect stdout for log
self.redir = RedirectText(self.status_text, self.log_stream, self.text.label_widget)
if not debug:
sys.stdout = self.redir
sys.stderr = self.redir
Frame.__init__(self)
self.noteBookFrame.grid()
self.BFrame.grid(row=0, column=0, columnspan=27, sticky=N) # ", column=13)" puts the tabs in the middle!
self.noteBook.grid(row=1, column=0, columnspan=27)
#self.progbarspace.grid(row=2, column=0, padx=(273, 0), sticky=E)
def change_tab(self, IDNum):
"""Internal Function"""
for i in (a for a in range(0, len(list(self.tabVars.keys())))):
if not i in self.deletedTabs:
if i != IDNum:
self.tabVars[i][1].grid_remove()
self.tabVars[i][0]['relief'] = self.inactiverelief
self.tabVars[i][0]['fg'] = self.inactivefg
self.tabVars[i][0]['font'] = self.inactivefc
self.tabVars[i][0]['bg'] = '#c5c5c5'
else:
self.tabVars[i][1].grid()
self.tabVars[IDNum][0]['relief'] = self.activerelief
self.tabVars[i][0]['fg'] = self.activefg
self.tabVars[i][0]['font'] = self.activefc
self.tabVars[i][0]['bg'] = 'white'
def add_tab(self, width=2, **kw):
import tkinter
"""Creates a new tab, and returns its corresponding frame
"""
temp = self.tabs
self.tabVars[self.tabs] = [Label(self.BFrame, relief = RIDGE, **kw)]
self.tabVars[self.tabs][0].bind("<Button-1>", lambda Event:self.change_tab(temp))
self.tabVars[self.tabs][0].pack(side = LEFT, ipady=self.ypad, ipadx=self.xpad)
self.tabVars[self.tabs].append(Frame(self.noteBook, **self.kwargs))
self.tabVars[self.tabs][1].grid(row=0, column=0)
self.change_tab(0)
self.tabs += 1
return self.tabVars[temp][1]
def destroy_tab(self, tab):
"""Delete a tab from the notebook, as well as it's corresponding frame
"""
self.iteratedTabs = 0
for b in list(self.tabVars.values()):
if b[1] == tab:
b[0].destroy()
self.tabs -= 1
self.deletedTabs.append(self.iteratedTabs)
break
self.iteratedTabs += 1
def focus_on(self, tab):
"""Locate the IDNum of the given tab and use
change_tab to give it focus
"""
self.iteratedTabs = 0
for b in list(self.tabVars.values()):
if b[1] == tab:
self.change_tab(self.iteratedTabs)
break
self.iteratedTabs += 1
def corpkit_gui(noupdate=False, loadcurrent=False, debug=False):
"""
The actual code for the application
:param noupdate: prevent auto update checking
:type noupdate: bool
:param loadcurrent: load this path as the project
:type loadcurrent: str
"""
# make app
root=Tk()
#minimise it
root.withdraw( )
# generate splash
with SplashScreen(root, 'loading_image.png', 1.0):
# set app size
#root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
import warnings
warnings.filterwarnings("ignore")
import traceback
import dateutil
import sys
import os
import corpkit
from corpkit.process import get_gui_resource_dir, get_fullpath_to_jars
from tkintertable import TableCanvas, TableModel
from nltk.draw.table import MultiListbox, Table
from collections import OrderedDict
from pandas import Series, DataFrame
# stop warning when insecure download is performed
# this somehow raised an attribute error for anrej,
# so we'll allow it to pass ...
import requests
try:
requests.packages.urllib3.disable_warnings()
except AttributeError:
pass
import locale
if sys.platform == 'win32':
try:
locale.setlocale(locale.LC_ALL, 'english-usa')
except:
pass
else:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
# unused in the gui, dummy imports for pyinstaller
#import seaborn
from hashlib import md5
import chardet
import pyparsing
# a try statement in case not bundling scipy, which
# tends to bloat the .app
try:
from scipy.stats import linregress
except:
pass
# compress some things for a small screen ...
small_screen = root.winfo_screenheight() < 800
#small_screen = True
## add tregex and some other bits to path
paths = ['', 'dictionaries', 'corpkit', 'nltk_data']
for p in paths:
fullp = os.path.join(rd, p).rstrip('/')
if not fullp in sys.path:
sys.path.append(fullp)
# add nltk data to path
import nltk
nltk_data_path = os.path.join(rd, 'nltk_data')
if nltk_data_path not in nltk.data.path:
nltk.data.path.append(os.path.join(rd, 'nltk_data'))
# not sure if needed anymore: more path setting
corpath = os.path.dirname(corpkit.__file__)
baspat = os.path.dirname(os.path.dirname(corpkit.__file__))
dicpath = os.path.join(baspat, 'dictionaries')
os.environ["PATH"] += os.pathsep + corpath + os.pathsep + dicpath
sys.path.append(corpath)
sys.path.append(dicpath)
sys.path.append(baspat)
root.title("corpkit")
root.imagewatched = StringVar()
#root.overrideredirect(True)
root.resizable(FALSE,FALSE)
note_height = 600 if small_screen else 660
note_width = root.winfo_screenwidth()
if note_width > note_height * 1.62:
note_width = note_height * 1.62
note_width = int(note_width)
note = Notebook(root, width=note_width, height=note_height,
activefg='#000000', inactivefg='#585555', debug=debug) #Create a Note book Instance
note.grid()
tab0 = note.add_tab(text="Build")
tab1 = note.add_tab(text="Interrogate")
tab2 = note.add_tab(text="Edit")
tab3 = note.add_tab(text="Visualise")
tab4 = note.add_tab(text="Concordance")
note.text.update_idletasks()
################### ################### ################### ###################
# VARIABLES # # VARIABLES # # VARIABLES # # VARIABLES #
################### ################### ################### ###################
# in this section, some recurring, empty variables are defined
# to do: compress most of the dicts into one
# round up text so we can bind keys to them later
all_text_widgets = []
# for the build tab (could be cleaned up)
chosen_f = []
sentdict = {}
boxes = []
buildbits = {}
most_recent_projects = []
# some variables that will get used throughout the gui
# a dict of the editor frame names and models
editor_tables = {}
currently_in_each_frame = {}
# for conc sort toggle
sort_direction = True
subc_sel_vals = []
subc_sel_vals_build = []
# store every interrogation and conc in this session
all_interrogations = OrderedDict()
all_conc = OrderedDict()
all_images = []
all_interrogations['None'] = 'None'
# corpus path setter
corpus_fullpath = StringVar()
corpus_fullpath.set('')
corenlppath = StringVar()
corenlppath.set(os.path.join(os.path.expanduser("~"), 'corenlp'))
# visualise
# where to put the current figure and frame
thefig = []
oldplotframe = []
# for visualise, this holds a list of subcorpora or entries,
# so that the title will dynamically change at the right time
single_entry_or_subcorpus = {}
# conc
# to do: more consistent use of globals!
itemcoldict = {}
current_conc = ['None']
global conc_saved
conc_saved = False
import itertools
try:
toggle = itertools.cycle([True, False]).__next__
except AttributeError:
toggle = itertools.cycle([True, False]).next
# manage pane: obsolete
manage_box = {}
# custom lists
custom_special_dict = {}
# just the ones on the hd
saved_special_dict = {}
# not currently using this sort feature---should use in conc though
import itertools
try:
direct = itertools.cycle([0,1]).__next__
except AttributeError:
direct = itertools.cycle([0,1]).next
corpus_names_and_speakers = {}
################### ################### ################### ###################
# DICTIONARIES # # DICTIONARIES # # DICTIONARIES # # DICTIONARIES #
################### ################### ################### ###################
qd = {'Subjects': r'__ >># @NP',
'Processes': r'/VB.?/ >># ( VP >+(VP) (VP !> VP $ NP))',
'Modals': r'MD < __',
'Participants': r'/(NN|PRP|JJ).?/ >># (/(NP|ADJP)/ $ VP | > VP)',
'Entities': r'NP <# NNP',
'Any': 'any'}
# concordance colours
colourdict = {1: '#fbb4ae',
2: '#b3cde3',
3: '#ccebc5',
4: '#decbe4',
5: '#fed9a6',
6: '#ffffcc',
7: '#e5d8bd',
8: '#D9DDDB',
9: '#000000',
0: '#F4F4F4'}
# translate search option for interrogator()
transdict = {
'Get distance from root for regex match': 'a',
'Get tag and word of match': 'b',
'Count matches': 'c',
'Get role of match': 'f',
'Get "role:dependent", matching governor': 'd',
'Get ngrams from tokens': 'j',
'Get "role:governor", matching dependent': 'g',
'Get lemmata matching regex': 'l',
'Get tokens by role': 'm',
'Get ngrams from trees': 'n',
'Get part-of-speech tag': 'p',
'Regular expression search': 'r',
'Get tokens matching regex': 't',
'Get stats': 'v',
'Get words': 'w',
'Get tokens by regex': 'h',
'Get tokens matching list': 'e'}
# translate sort_by for editor
sort_trans = {'None': False,
'Total': 'total',
'Inverse total': 'infreq',
'Name': 'name',
'Increase': 'increase',
'Decrease': 'decrease',
'Static': 'static',
'Turbulent': 'turbulent',
'P value': 'p',
'Reverse': 'reverse'}
# translate special queries for interrogator()
spec_quer_translate = {'Participants': 'w',
'Any': 'any',
'Processes': 'w',
'Subjects': 'w',
'Entities': 'w'}
# todo: newer method
from corpkit.constants import transshow, transobjs, LETTERS
from corpkit.process import make_name_to_query_dict
exist = {'Trees': 't', 'Stats': 'v', 'CQL': 'cql'}
convert_name_to_query = make_name_to_query_dict(exist)
# these are example queries for each data type
def_queries = {}
for i in convert_name_to_query.keys():
if i.lower().endswith('function'):
def_queries[i] = r'\b(amod|nn|advm|vmod|tmod)\b'
elif i.lower().endswith('lemma'):
def_queries[i] = r'\b(want|desire|need)\b'
elif i.lower().endswith('word class'):
def_queries[i] = r'^(ad)verb$'
elif i.lower().endswith('index'):
def_queries[i] = r'[012345]',
elif i.lower().endswith('stats'):
def_queries[i] = r'any',
elif i.lower().endswith('cql'):
def_queries[i] = r'[pos="RB" & word=".*ly$"]',
elif i.lower().endswith('pos'):
def_queries[i] = r'^[NJR]',
elif i.lower().endswith('index'):
def_queries[i] = r'[012345]',
elif i.lower().endswith('distance from root'):
def_queries[i] = r'[012345]',
elif i.lower().endswith('trees'):
def_queries[i] = r'JJ > (NP <<# /NN.?/)'
else:
def_queries[i] = r'\b(m.n|wom.n|child(ren)?)\b'
################### ################### ################### ###################
# FUNCTIONS # # FUNCTIONS # # FUNCTIONS # # FUNCTIONS #
################### ################### ################### ###################
# some functions used throughout the gui
def focus_next_window(event):
"""tab to next widget"""
event.widget.tk_focusNext().focus()
try:
event.widget.tk_focusNext().selection_range(0, END)
except:
pass
return "break"
def runner(button, command, conc=False):
"""
Runs the command of a button, disabling the button till it is done,
whether it returns early or not
"""
try:
if button == interrobut or button == interrobut_conc:
command(conc)
else:
command()
except Exception as err:
import traceback
print(traceback.format_exc())
note.progvar.set(0)
button.config(state=NORMAL)
def refresh_images(*args):
"""get list of images saved in images folder"""
import os
if os.path.isdir(image_fullpath.get()):
image_list = sorted([f for f in os.listdir(image_fullpath.get()) if f.endswith('.png')])
for iname in image_list:
if iname.replace('.png', '') not in all_images:
all_images.append(iname.replace('.png', ''))
else:
for i in all_images:
all_images.pop(i)
#refresh()
# if the dummy variable imagewatched is changed, refresh images
# this connects to matplotlib's save button, if the modified
# matplotlib is installed. a better way to do this would be good!
root.imagewatched.trace("w", refresh_images)
def timestring(input):
"""print with time prepended"""
from time import localtime, strftime
thetime = strftime("%H:%M:%S", localtime())
print('%s: %s' % (thetime, input.lstrip()))
def conmap(cnfg, section):
"""helper for load settings"""
dict1 = {}
options = cnfg.options(section)
# todo: this loops over too many times
for option in options:
#try:
opt = cnfg.get(section, option)
if opt == '0':
opt = False
elif opt == '1':
opt = True
elif opt.isdigit():
opt = int(opt)
if isinstance(opt, str) and opt.lower() == 'none':
opt = False
if not opt:
opt = 0
dict1[option] = opt
return dict1
def convert_pandas_dict_to_ints(dict_obj):
"""try to turn pandas as_dict into ints, for tkintertable
the huge try statement is to stop errors when there
is a single corpus --- need to find source of problem
earlier, though"""
vals = []
try:
for a, b in list(dict_obj.items()):
# c = year, d = count
for c, d in list(b.items()):
vals.append(d)
if all([float(x).is_integer() for x in vals if is_number(x)]):
for a, b in list(dict_obj.items()):
for c, d in list(b.items()):
if is_number(d):
b[c] = int(d)
except TypeError:
pass
return dict_obj
def update_spreadsheet(frame_to_update, df_to_show=None, model=False,
height=140, width=False, indexwidth=70):
"""refresh a spreadsheet"""
from collections import OrderedDict
import pandas
# colours for tkintertable
kwarg = {'cellbackgr': '#F7F7FA',
'grid_color': '#c5c5c5',
'entrybackgr': '#F4F4F4',
'selectedcolor': 'white',
'rowselectedcolor': '#b3cde3',
'multipleselectioncolor': '#fbb4ae'}
if width:
kwarg['width'] = width
if model and not df_to_show:
df_to_show = make_df_from_model(model)
#if need_make_totals:
df_to_show = make_df_totals(df_to_show)
if df_to_show is not None:
# for abs freq, make total
model = TableModel()
df_to_show = pandas.DataFrame(df_to_show, dtype=object)
#if need_make_totals(df_to_show):
df_to_show = make_df_totals(df_to_show)
# turn pandas into dict
raw_data = df_to_show.to_dict()
# convert to int if possible
raw_data = convert_pandas_dict_to_ints(raw_data)
table = TableCanvas(frame_to_update, model=model,
showkeynamesinheader=True,
height=height,
rowheaderwidth=row_label_width.get(), cellwidth=cell_width.get(), **kwarg)
table.createTableFrame()
model = table.model
model.importDict(raw_data)
# move columns into correct positions
for index, name in enumerate(list(df_to_show.index)):
model.moveColumn(model.getColumnIndex(name), index)
table.createTableFrame()
# sort the rows
if 'tkintertable-order' in list(df_to_show.index):
table.sortTable(columnName = 'tkintertable-order')
ind = model.columnNames.index('tkintertable-order')
try:
model.deleteColumn(ind)
except:
pass
if 'Total' in list(df_to_show.index):
table.sortTable(columnName='Total', reverse=True)
elif len(df_to_show.index) == 1:
table.sortTable(columnIndex=0, reverse=True)
else:
#nm = os.path.basename(corpus_fullpath.get().rstrip('/'))
ind = len(df_to_show.columns) - 1
table.sortTable(columnIndex = ind, reverse = 1)
#pass
table.redrawTable()
editor_tables[frame_to_update] = model
currently_in_each_frame[frame_to_update] = df_to_show
return
if model:
table = TableCanvas(frame_to_update, model=model,
showkeynamesinheader=True,
height=height,
rowheaderwidth=row_label_width.get(), cellwidth=cell_width.get(),
**kwarg)
table.createTableFrame()
try:
table.sortTable(columnName = 'Total', reverse = direct())
except:
direct()
table.sortTable(reverse = direct())
table.createTableFrame()
table.redrawTable()
else:
table = TableCanvas(frame_to_update, height=height, cellwidth=cell_width.get(),
showkeynamesinheader=True, rowheaderwidth=row_label_width.get(), **kwarg)
table.createTableFrame() # sorts by total freq, ok for now
table.redrawTable()
from corpkit.cql import remake_special
def ignore():
"""turn this on when buttons should do nothing"""
return "break"
def need_make_totals(df):
"""check if a df needs totals"""
if len(list(df.index)) < 3:
return False
try:
x = df.iloc[0,0]
except:
return False
# if was_series, basically
try:
vals = [i for i in list(df.iloc[0,].values) if is_number(i)]
except TypeError:
return False
if len(vals) == 0:
return False
if all([float(x).is_integer() for x in vals]):
return True
else:
return False
def make_df_totals(df):
"""make totals for a dataframe"""
df = df.drop('Total', errors = 'ignore')
# add new totals
df.ix['Total'] = df.drop('tkintertable-order', errors = 'ignore').sum().astype(object)
return df
def make_df_from_model(model):
"""generate df from spreadsheet"""
import pandas
from io import StringIO
recs = model.getAllCells()
colnames = model.columnNames
collabels = model.columnlabels
row = []
csv_data = []
for c in colnames:
row.append(collabels[c])
try:
csv_data.append(','.join([str(s, errors = 'ignore') for s in row]))
except TypeError:
csv_data.append(','.join([str(s) for s in row]))
#csv_data.append('\n')
for row in list(recs.keys()):
rowname = model.getRecName(row)
try:
csv_data.append(','.join([str(rowname, errors = 'ignore')] + [str(s, errors = 'ignore') for s in recs[row]]))
except TypeError:
csv_data.append(','.join([str(rowname)] + [str(s) for s in recs[row]]))
#csv_data.append('\n')
#writer.writerow(recs[row])
csv = '\n'.join(csv_data)
uc = unicode(csv, errors='ignore')
newdata = pandas.read_csv(StringIO(uc), index_col=0, header=0)
newdata = pandas.DataFrame(newdata, dtype=object)
newdata = newdata.T
newdata = newdata.drop('Total', errors='ignore')
newdata = add_tkt_index(newdata)
if need_make_totals(newdata):
newdata = make_df_totals(newdata)
return newdata
def color_saved(lb, savepath=False, colour1='#D9DDDB', colour2='white',
ext='.p', lists=False):
"""make saved items in listbox have colour background
lb: listbox to colour
savepath: where to look for existing files
colour1, colour2: what to colour foundd and not found
ext: what to append to filenames when searching for them
lists: if working with wordlists, things need to be done differently, more colours"""
all_items = [lb.get(i) for i in range(len(lb.get(0, END)))]
# define colours for permanent lists in wordlists
if lists:
colour3 = '#ffffcc'
colour4 = '#fed9a6'
for index, item in enumerate(all_items):
# check if saved
if not lists:
# files or directories with current corpus in name or without
newn = current_corpus.get() + '-' + urlify(item) + ext
a = os.path.isfile(os.path.join(savepath, urlify(item) + ext))
b = os.path.isdir(os.path.join(savepath, urlify(item)))
c = os.path.isfile(os.path.join(savepath, newn))
d = os.path.isdir(os.path.join(savepath, newn))
if any(x for x in [a, b, c, d]):
issaved = True
else:
issaved = False
# for lists, check if permanently stored
else:
issaved = False
if item in list(saved_special_dict.keys()):
issaved = True
if current_corpus.get() + '-' + item in list(saved_special_dict.keys()):
issaved = True
if issaved:
lb.itemconfig(index, {'bg':colour1})
else:
lb.itemconfig(index, {'bg':colour2})
if lists:
if item in list(predict.keys()):
if item.endswith('_ROLE'):
lb.itemconfig(index, {'bg':colour3})
else:
lb.itemconfig(index, {'bg':colour4})
lb.selection_clear(0, END)
def paste_into_textwidget(*args):
"""paste function for widgets ... doesn't seem to work as expected"""
try:
start = args[0].widget.index("sel.first")
end = args[0].widget.index("sel.last")
args[0].widget.delete(start, end)
except TclError as e:
# nothing was selected, so paste doesn't need
# to delete anything
pass
# for some reason, this works with the error.
try:
args[0].widget.insert("insert", clipboard.rstrip('\n'))
except NameError:
pass
def copy_from_textwidget(*args):
"""more commands for textwidgets"""
#args[0].widget.clipboard_clear()
text=args[0].widget.get("sel.first", "sel.last").rstrip('\n')
args[0].widget.clipboard_append(text)
def cut_from_textwidget(*args):
"""more commands for textwidgets"""
text=args[0].widget.get("sel.first", "sel.last")
args[0].widget.clipboard_append(text)
args[0].widget.delete("sel.first", "sel.last")
def select_all_text(*args):
"""more commands for textwidgets"""
try:
args[0].widget.selection_range(0, END)
except:
args[0].widget.tag_add("sel","1.0","end")
def make_corpus_name_from_abs(pfp, cfp):
if pfp in cfp:
return cfp.replace(pfp.rstrip('/') + '/', '')
else:
return cfp
def get_all_corpora():
import os
all_corpora = []
for root, ds, fs in os.walk(corpora_fullpath.get()):
for d in ds:
path = os.path.join(root, d)
relpath = path.replace(corpora_fullpath.get(), '', 1).lstrip('/')
all_corpora.append(relpath)
return sorted(all_corpora)
def update_available_corpora(delete=False):
"""updates corpora in project, and returns a list of them"""
import os
fp = corpora_fullpath.get()
all_corpora = get_all_corpora()
for om in [available_corpora, available_corpora_build]:
om.config(state=NORMAL)
om['menu'].delete(0, 'end')
if not delete:
for corp in all_corpora:
if not corp.endswith('parsed') and not corp.endswith('tokenised') and om == available_corpora:
continue
om['menu'].add_command(label=corp, command=_setit(current_corpus, corp))
return all_corpora
def refresh():
"""refreshes the list of dataframes in the editor and plotter panes"""
import os
# Reset name_of_o_ed_spread and delete all old options
# get the latest only after first interrogation
if len(list(all_interrogations.keys())) == 1:
selected_to_edit.set(list(all_interrogations.keys())[-1])
dataframe1s['menu'].delete(0, 'end')
dataframe2s['menu'].delete(0, 'end')
every_interrogation['menu'].delete(0, 'end')
#every_interro_listbox.delete(0, 'end')
#every_image_listbox.delete(0, 'end')
new_choices = []
for interro in list(all_interrogations.keys()):
new_choices.append(interro)
new_choices = tuple(new_choices)
dataframe2s['menu'].add_command(label='Self', command=_setit(data2_pick, 'Self'))
if project_fullpath.get() != '' and project_fullpath.get() != rd:
dpath = os.path.join(project_fullpath.get(), 'dictionaries')
if os.path.isdir(dpath):
dicts = sorted([f.replace('.p', '') for f in os.listdir(dpath) if os.path.isfile(os.path.join(dpath, f)) and f.endswith('.p')])
for d in dicts:
dataframe2s['menu'].add_command(label=d, command=_setit(data2_pick, d))
for choice in new_choices:
dataframe1s['menu'].add_command(label=choice, command=_setit(selected_to_edit, choice))
dataframe2s['menu'].add_command(label=choice, command=_setit(data2_pick, choice))
every_interrogation['menu'].add_command(label=choice, command=_setit(data_to_plot, choice))
refresh_images()
# refresh
prev_conc_listbox.delete(0, 'end')
for i in sorted(all_conc.keys()):
prev_conc_listbox.insert(END, i)
def add_tkt_index(df):
"""add order to df for tkintertable"""
import pandas
df = df.T
df = df.drop('tkintertable-order', errors = 'ignore', axis=1)
df['tkintertable-order'] = pandas.Series([index for index, data in enumerate(list(df.index))], index = list(df.index))
df = df.T
return df
def namer(name_box_text, type_of_data = 'interrogation'):
"""returns a name to store interrogation/editor result as"""
if name_box_text.lower() == 'untitled' or name_box_text == '':
c = 0
the_name = '%s-%s' % (type_of_data, str(c).zfill(2))
while any(x.startswith(the_name) for x in list(all_interrogations.keys())):
c += 1
the_name = '%s-%s' % (type_of_data, str(c).zfill(2))
else:
the_name = name_box_text
return the_name
def show_prev():
"""show previous interrogation"""
import pandas
currentname = name_of_interro_spreadsheet.get()
# get index of current index
if not currentname:
prev.configure(state=DISABLED)
return
ind = list(all_interrogations.keys()).index(currentname)
# if it's higher than zero
if ind > 0:
if ind == 1:
prev.configure(state=DISABLED)
nex.configure(state=NORMAL)
else:
if ind + 1 < len(list(all_interrogations.keys())):
nex.configure(state=NORMAL)
prev.configure(state=NORMAL)
newname = list(all_interrogations.keys())[ind - 1]
newdata = all_interrogations[newname]
name_of_interro_spreadsheet.set(newname)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
if isinstance(newdata, pandas.DataFrame):
toshow = newdata
toshowt = newdata.sum()
elif hasattr(newdata, 'results') and newdata.results is not None:
toshow = newdata.results
if hasattr(newdata, 'totals') and newdata.results is not None:
toshowt = pandas.DataFrame(newdata.totals, dtype=object)
update_spreadsheet(interro_results, toshow, height=340)
update_spreadsheet(interro_totals, toshowt, height=10)
refresh()
else:
prev.configure(state=DISABLED)
nex.configure(state=NORMAL)
def show_next():
"""show next interrogation"""
import pandas
currentname = name_of_interro_spreadsheet.get()
if currentname:
ind = list(all_interrogations.keys()).index(currentname)
else:
ind = 0
if ind > 0:
prev.configure(state=NORMAL)
if ind + 1 < len(list(all_interrogations.keys())):
if ind + 2 == len(list(all_interrogations.keys())):
nex.configure(state=DISABLED)
prev.configure(state=NORMAL)
else:
nex.configure(state=NORMAL)
newname = list(all_interrogations.keys())[ind + 1]
newdata = all_interrogations[newname]
name_of_interro_spreadsheet.set(newname)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
if isinstance(newdata, pandas.DataFrame):
toshow = newdata
toshowt = newdata.sum()
elif hasattr(newdata, 'results') and newdata.results is not None:
toshow = newdata.results
if hasattr(newdata, 'totals') and newdata.results is not None:
toshowt = newdata.totals
update_spreadsheet(interro_results, toshow, height=340)
totals_as_df = pandas.DataFrame(toshowt, dtype=object)
update_spreadsheet(interro_totals, toshowt, height=10)
refresh()
else:
nex.configure(state=DISABLED)
prev.configure(state=NORMAL)
def exchange_interro_branch(namedtupname, newdata, branch='results'):
"""replaces a namedtuple results/totals with newdata
--- such a hack, should upgrade to recordtype"""
namedtup = all_interrogations[namedtupname]
the_branch = getattr(namedtup, branch)
if branch == 'results':
the_branch.drop(the_branch.index, inplace=True)
the_branch.drop(the_branch.columns, axis=1, inplace=True)
for i in list(newdata.columns):
the_branch[i] = i
for index, i in enumerate(list(newdata.index)):
the_branch.loc[i] = newdata.ix[index]
elif branch == 'totals':
the_branch.drop(the_branch.index, inplace=True)
for index, datum in zip(newdata.index, newdata.iloc[:,0].values):
the_branch.set_value(index, datum)
all_interrogations[namedtupname] = namedtup
def update_interrogation(table_id, id, is_total=False):
"""takes any changes made to spreadsheet and saves to the interrogation
id: 0 = interrogator
1 = old editor window
2 = new editor window"""
model=editor_tables[table_id]
newdata = make_df_from_model(model)
if need_make_totals(newdata):
newdata = make_df_totals(newdata)
if id == 0:
name_of_interrogation = name_of_interro_spreadsheet.get()
if id == 1:
name_of_interrogation = name_of_o_ed_spread.get()
if id == 2:
name_of_interrogation = name_of_n_ed_spread.get()
if not is_total:
exchange_interro_branch(name_of_interrogation, newdata, branch='results')
else:
exchange_interro_branch(name_of_interrogation, newdata, branch='totals')
def update_all_interrogations(pane='interrogate'):
import pandas
"""update all_interrogations within spreadsheet data
need a very serious cleanup!"""
# to do: only if they are there!
if pane == 'interrogate':
update_interrogation(interro_results, id=0)
update_interrogation(interro_totals, id=0, is_total=True)
if pane == 'edit':
update_interrogation(o_editor_results, id=1)
update_interrogation(o_editor_totals, id=1, is_total=True)
# update new editor sheet if it's there
if name_of_n_ed_spread.get() != '':
update_interrogation(n_editor_results, id=2)
update_interrogation(n_editor_totals, id=2, is_total=True)
timestring('Updated interrogations with manual data.')
if pane == 'interrogate':
the_data = all_interrogations[name_of_interro_spreadsheet.get()]
tot = pandas.DataFrame(the_data.totals, dtype=object)
if the_data.results is not None:
update_spreadsheet(interro_results, the_data.results, height=340)
else:
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, tot, height=10)
if pane == 'edit':
the_data = all_interrogations[name_of_o_ed_spread.get()]
there_is_new_data = False
try:
newdata = all_interrogations[name_of_n_ed_spread.get()]
there_is_new_data = True
except:
pass
if the_data.results is not None:
update_spreadsheet(o_editor_results, the_data.results, height=140)
update_spreadsheet(o_editor_totals, pandas.DataFrame(the_data.totals, dtype=object), height=10)
if there_is_new_data:
if newdata != 'None' and newdata != '':
if the_data.results is not None:
update_spreadsheet(n_editor_results, newdata.results, height=140)
update_spreadsheet(n_editor_totals, pandas.DataFrame(newdata.totals, dtype=object), height=10)
if name_of_o_ed_spread.get() == name_of_interro_spreadsheet.get():
the_data = all_interrogations[name_of_interro_spreadsheet.get()]
tot = pandas.DataFrame(the_data.totals, dtype=object)
if the_data.results is not None:
update_spreadsheet(interro_results, the_data.results, height=340)
update_spreadsheet(interro_totals, tot, height=10)
timestring('Updated spreadsheet display in edit window.')
from corpkit.process import is_number
################### ################### ################### ###################
#PREFERENCES POPUP# #PREFERENCES POPUP# #PREFERENCES POPUP# #PREFERENCES POPUP#
################### ################### ################### ###################
# make variables with default values
do_auto_update = IntVar()
do_auto_update.set(1)
do_auto_update_this_session = IntVar()
do_auto_update_this_session.set(1)
#conc_when_int = IntVar()
#conc_when_int.set(1)
only_format_match = IntVar()
only_format_match.set(0)
files_as_subcorpora = IntVar()
files_as_subcorpora.set(0)
do_concordancing = IntVar()
do_concordancing.set(1)
show_conc_metadata = IntVar()
show_conc_metadata.set(1)
#noregex = IntVar()
#noregex.set(0)
parser_memory = StringVar()
parser_memory.set(str(2000))
truncate_conc_after = IntVar()
truncate_conc_after.set(9999)
truncate_spreadsheet_after = IntVar()
truncate_spreadsheet_after.set(9999)
corenlppath = StringVar()
corenlppath.set(os.path.join(os.path.expanduser("~"), 'corenlp'))
row_label_width=IntVar()
row_label_width.set(100)
cell_width=IntVar()
cell_width.set(50)
p_val = DoubleVar()
p_val.set(0.05)
# a place for the toplevel entry info
entryboxes = OrderedDict()
# fill it with null data
for i in range(10):
tmp = StringVar()
tmp.set('')
entryboxes[i] = tmp
def preferences_popup():
try:
global toplevel
toplevel.destroy()
except:
pass
from tkinter import Toplevel
pref_pop = Toplevel()
#pref_pop.config(background = '#F4F4F4')
pref_pop.geometry('+300+100')
pref_pop.title("Preferences")
#pref_pop.overrideredirect(1)
pref_pop.wm_attributes('-topmost', 1)
Label(pref_pop, text='').grid(row=0, column=0, pady=2)
def quit_coding(*args):
save_tool_prefs(printout=True)
pref_pop.destroy()
tmp = Checkbutton(pref_pop, text='Automatically check for updates', variable=do_auto_update, onvalue=1, offvalue=0)
if do_auto_update.get() == 1:
tmp.select()
all_text_widgets.append(tmp)
tmp.grid(row=0, column=0, sticky=W)
Label(pref_pop, text='Truncate concordance lines').grid(row=1, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=truncate_conc_after, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=1, column=1, sticky=E)
Label(pref_pop, text='Truncate spreadsheets').grid(row=2, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=truncate_spreadsheet_after, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=2, column=1, sticky=E)
Label(pref_pop, text='CoreNLP memory allocation (MB)').grid(row=3, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=parser_memory, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=3, column=1, sticky=E)
Label(pref_pop, text='Spreadsheet cell width').grid(row=4, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=cell_width, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=4, column=1, sticky=E)
Label(pref_pop, text='Spreadsheet row header width').grid(row=5, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=row_label_width, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=5, column=1, sticky=E)
Label(pref_pop, text='P value').grid(row=6, column=0, sticky=W)
tmp = Entry(pref_pop, textvariable=p_val, width=7)
all_text_widgets.append(tmp)
tmp.grid(row=6, column=1, sticky=E)
Label(pref_pop, text='CoreNLP path:', justify=LEFT).grid(row=7, column=0, sticky=W, rowspan = 1)
Button(pref_pop, text='Change', command=set_corenlp_path, width =5).grid(row=7, column=1, sticky=E)
Label(pref_pop, textvariable=corenlppath, justify=LEFT).grid(row=8, column=0, sticky=W)
#set_corenlp_path
tmp = Checkbutton(pref_pop, text='Treat files as subcorpora', variable=files_as_subcorpora, onvalue=1, offvalue=0)
tmp.grid(row=10, column=0, pady=(0,0), sticky=W)
#tmp = Checkbutton(pref_pop, text='Disable regex for plaintext', variable=noregex, onvalue=1, offvalue=0)
#tmp.grid(row=9, column=1, pady=(0,0), sticky=W)
tmp = Checkbutton(pref_pop, text='Do concordancing', variable=do_concordancing, onvalue=1, offvalue=0)
tmp.grid(row=10, column=1, pady=(0,0), sticky=W)
tmp = Checkbutton(pref_pop, text='Format concordance context', variable=only_format_match, onvalue=1, offvalue=0)
tmp.grid(row=11, column=0, pady=(0,0), sticky=W)
tmp = Checkbutton(pref_pop, text='Show concordance metadata', variable=show_conc_metadata, onvalue=1, offvalue=0)
tmp.grid(row=11, column=1, pady=(0,0), sticky=W)
stopbut = Button(pref_pop, text='Done', command=quit_coding)
stopbut.grid(row=12, column=0, columnspan=2, pady=15)
pref_pop.bind("<Return>", quit_coding)
pref_pop.bind("<Tab>", focus_next_window)
################### ################### ################### ###################
# INTERROGATE TAB # # INTERROGATE TAB # # INTERROGATE TAB # # INTERROGATE TAB #
################### ################### ################### ###################
# hopefully weighting the two columns, not sure if works
interro_opt = Frame(tab1)
interro_opt.grid(row=0, column=0)
tab1.grid_columnconfigure(2, weight=5)
def do_interrogation(conc=True):
"""the main function: calls interrogator()"""
import pandas
from corpkit.interrogator import interrogator
from corpkit.interrogation import Interrogation, Interrodict
doing_concondancing = True
# no pressing while running
#if not conc:
interrobut.config(state=DISABLED)
#else:
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
# progbar to zero
note.progvar.set(0)
for i in list(itemcoldict.keys()):
del itemcoldict[i]
# spelling conversion?
#conv = (spl.var).get()
#if conv == 'Convert spelling' or conv == 'Off':
# conv = False
# lemmatag: do i need to add as button if trees?
lemmatag = False
query = qa.get(1.0, END).replace('\n', '')
if not datatype_picked.get() == 'CQL':
# allow list queries
if query.startswith('[') and query.endswith(']') and ',' in query:
query = query.lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
#elif transdict[searchtype()] in ['e', 's']:
#query = query.lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
else:
# convert special stuff
query = remake_special(query, customs=custom_special_dict,
case_sensitive=case_sensitive.get())
if query is False:
return
# make name for interrogation
the_name = namer(nametext.get(), type_of_data='interrogation')
cqlmode = IntVar()
cqlmode.set(0)
# get the main query
so = datatype_picked.get()
if so == 'CQL':
cqlmode.set(1)
selected_option = convert_name_to_query.get(so, so)
if selected_option == '':
timestring('You need to select a search type.')
return
queryd = {}
for k, v in list(additional_criteria.items()):
# this should already be done
queryd[k] = v
queryd[selected_option] = query
# cql mode just takes a string
if cqlmode.get():
queryd = query
if selected_option == 'v':
queryd = 'features'
doing_concondancing = False
else:
doing_concondancing = True
# to do: make this order customisable for the gui too
poss_returns = [return_function, return_pos, return_lemma, return_token, \
return_gov, return_dep, return_tree, return_index, return_distance, \
return_count, return_gov_lemma, return_gov_pos, return_gov_func, \
return_dep_lemma, return_dep_pos, return_dep_func, \
return_ngm_lemma, return_ngm_pos, return_ngm_func, return_ngm]
must_make = [return_ngm_lemma, return_ngm_pos, return_ngm_func, return_ngm]
to_show = [prenext_pos.get() + i.get() if i in must_make and i.get() else i.get() for i in poss_returns]
to_show = [i for i in to_show if i and 'Position' not in i]
if not to_show and not selected_option == 'v':
timestring('Interrogation must return something.')
return
if 'c' in to_show:
doing_concondancing = False
if not do_concordancing.get():
doing_concondancing = False
#if noregex.get() == 1:
# regex = False
#else:
# regex = True
subcc = False
just_subc = False
met_field_ids = [by_met_listbox.get(i) for i in by_met_listbox.curselection()]
met_val_ids = [speaker_listbox.get(i) for i in speaker_listbox.curselection()]
if len(met_field_ids) == 1:
met_field_ids = met_field_ids[0]
if len(met_val_ids) == 1:
met_val_ids = met_val_ids[0]
if met_field_ids and not met_val_ids:
if isinstance(met_field_ids, list):
subcc = met_field_ids
else:
if met_field_ids == 'folders':
subcc = False
elif met_field_ids == 'files':
files_as_subcorpora.set(1)
elif met_field_ids == 'none':
# todo: no sub mode?
subcc = False
else:
subcc = met_field_ids
elif not met_field_ids:
subcc = False
elif met_field_ids and met_val_ids:
subcc = met_field_ids
if 'ALL' in met_val_ids:
pass
else:
just_subc = {met_field_ids: met_val_ids}
# default interrogator args: root and note pass the gui itself for updating
# progress bar and so on.
interrogator_args = {'search': queryd,
'show': to_show,
'case_sensitive': bool(case_sensitive.get()),
'no_punct': bool(no_punct.get()),
#'spelling': conv,
'root': root,
'note': note,
'df1_always_df': True,
'conc': doing_concondancing,
'only_format_match': not bool(only_format_match.get()),
#'dep_type': depdict.get(kind_of_dep.get(), 'CC-processed'),
'nltk_data_path': nltk_data_path,
#'regex': regex,
'coref': coref.get(),
'cql': cqlmode.get(),
'files_as_subcorpora': bool(files_as_subcorpora.get()),
'subcorpora': subcc,
'just_metadata': just_subc,
'show_conc_metadata': bool(show_conc_metadata.get()),
'use_interrodict': True}
if debug:
print(interrogator_args)
excludes = {}
for k, v in list(ex_additional_criteria.items()):
if k != 'None':
excludes[k.lower()[0]] = v
if exclude_op.get() != 'None':
q = remake_special(exclude_str.get(), return_list=True,
customs=custom_special_dict,
case_sensitive=case_sensitive.get())
if q:
excludes[exclude_op.get().lower()[0]] = q
if excludes:
interrogator_args['exclude'] = excludes
try:
interrogator_args['searchmode'] = anyall.get()
except:
pass
try:
interrogator_args['excludemode'] = excludemode.get()
except:
pass
# translate lemmatag
tagdict = {'Noun': 'n',
'Adjective': 'a',
'Verb': 'v',
'Adverb': 'r',
'None': False,
'': False,
'Off': False}
#interrogator_args['lemmatag'] = tagdict[lemtag.get()]
if corpus_fullpath.get() == '':
timestring('You need to select a corpus.')
return
# stats preset is actually a search type
#if special_queries.get() == 'Stats':
# selected_option = 'v'
# interrogator_args['query'] = 'any'
# if ngramming, there are two extra options
ngm = ngmsize.var.get()
if ngm != 'Size':
interrogator_args['gramsize'] = int(ngm)
clc = collosize.var.get()
if clc != 'Size':
interrogator_args['window'] = int(clc)
#if subc_pick.get() == "Subcorpus" or subc_pick.get().lower() == 'all' or \
# selected_corpus_has_no_subcorpora.get() == 1:
corp_to_search = corpus_fullpath.get()
#else:
# corp_to_search = os.path.join(corpus_fullpath.get(), subc_pick.get())
# do interrogation, return if empty
if debug:
print('CORPUS:', corp_to_search)
interrodata = interrogator(corp_to_search, **interrogator_args)
if isinstance(interrodata, Interrogation):
if hasattr(interrodata, 'results') and interrodata.results is not None:
if interrodata.results.empty:
timestring('No results found, sorry.')
return
# make sure we're redirecting stdout again
if not debug:
sys.stdout = note.redir
# update spreadsheets
if not isinstance(interrodata, (Interrogation, Interrodict)):
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, df_to_show=None, height=10)
return
# make non-dict results into dict, so we can iterate no matter
# if there were multiple results or not
interrogation_returned_dict = False
from collections import OrderedDict
if isinstance(interrodata, Interrogation):
dict_of_results = OrderedDict({the_name: interrodata})
else:
dict_of_results = interrodata
interrogation_returned_dict = True
# remove dummy entry from master
all_interrogations.pop('None', None)
# post-process each result and add to master list
for nm, r in sorted(dict_of_results.items(), key=lambda x: x[0]):
# drop over 9999
# type check probably redundant now
if r.results is not None:
large = [n for i, n in enumerate(list(r.results.columns)) if i > truncate_spreadsheet_after.get()]
r.results.drop(large, axis=1, inplace=True)
r.results.drop('Total', errors='ignore', inplace=True)
r.results.drop('Total', errors='ignore', inplace=True, axis=1)
# add interrogation to master list
if interrogation_returned_dict:
all_interrogations[the_name + '-' + nm] = r
all_conc[the_name + '-' + nm] = r.concordance
dict_of_results[the_name + '-' + nm] = dict_of_results.pop(nm)
# make multi for conc...
else:
all_interrogations[nm] = r
all_conc[nm] = r.concordance
# show most recent (alphabetically last) interrogation spreadsheet
recent_interrogation_name = list(dict_of_results.keys())[0]
recent_interrogation_data = list(dict_of_results.values())[0]
if queryd == {'v': 'any'}:
conc = False
if doing_concondancing:
conc_to_show = recent_interrogation_data.concordance
if conc_to_show is not None:
numresults = len(conc_to_show.index)
if numresults > truncate_conc_after.get() - 1:
nums = str(numresults)
if numresults == 9999:
nums += '+'
truncate = messagebox.askyesno("Long results list",
"%s unique concordance results! Truncate to %s?" % (nums, str(truncate_conc_after.get())))
if truncate:
conc_to_show = conc_to_show.head(truncate_conc_after.get())
add_conc_lines_to_window(conc_to_show, preserve_colour=False)
else:
timestring('No concordance results generated.')
global conc_saved
conc_saved = False
name_of_interro_spreadsheet.set(recent_interrogation_name)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
# total in a way that tkintertable likes
if isinstance(recent_interrogation_data.totals, int):
recent_interrogation_data.totals = Series(recent_interrogation_data.totals)
totals_as_df = pandas.DataFrame(recent_interrogation_data.totals, dtype=object)
# update spreadsheets
if recent_interrogation_data.results is not None:
update_spreadsheet(interro_results, recent_interrogation_data.results, height=340)
else:
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, totals_as_df, height=10)
ind = list(all_interrogations.keys()).index(name_of_interro_spreadsheet.get())
if ind == 0:
prev.configure(state=DISABLED)
else:
prev.configure(state=NORMAL)
if ind + 1 == len(list(all_interrogations.keys())):
nex.configure(state=DISABLED)
else:
nex.configure(state=NORMAL)
refresh()
if recent_interrogation_data.results is not None:
subs = r.results.index
else:
subs = r.totals.index
subc_listbox.delete(0, 'end')
for e in list(subs):
if e != 'tkintertable-order':
subc_listbox.insert(END, e)
#reset name
nametext.set('untitled')
if interrogation_returned_dict:
timestring('Interrogation finished, with multiple results.')
interrobut.config(state=NORMAL)
interrobut_conc.config(state=NORMAL)
recalc_but.config(state=NORMAL)
class MyOptionMenu(OptionMenu):
"""Simple OptionMenu for things that don't change."""
def __init__(self, tab1, status, *options):
self.var = StringVar(tab1)
self.var.set(status)
OptionMenu.__init__(self, tab1, self.var, *options)
self.config(font=('calibri',(12)),width=20)
self['menu'].config(font=('calibri',(10)))
def corpus_callback(*args):
"""
On selecting a corpus, set everything appropriately.
also, disable some kinds of search based on the name
"""
if not current_corpus.get():
return
import os
from os.path import join, isdir, isfile, exists
corpus_fullpath.set(join(corpora_fullpath.get(), current_corpus.get()))
fp = corpus_fullpath.get()
from corpkit.corpus import Corpus
corpus = Corpus(fp, print_info=False)
dtype = corpus.datatype
cols = []
if dtype == 'conll':
datatype_picked.set('Word')
try:
cols = corpus.metadata['columns']
except KeyError:
pass
try:
subdrs = sorted([d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))])
except FileNotFoundError:
subdrs = []
if len(subdrs) == 0:
charttype.set('bar')
pick_a_datatype['menu'].delete(0, 'end')
path_to_new_unparsed_corpus.set(fp)
#add_corpus_button.set('Added: "%s"' % os.path.basename(fp))
# why is it setting itself?
#current_corpus.set(os.path.basename(fp))
from corpkit.process import make_name_to_query_dict
exist = {'CQL': 'cql'}
if 'f' in cols:
exist['Trees'] = 't'
exist['Stats'] = 'v'
# todo: only cql for tokenised
convert_name_to_query = make_name_to_query_dict(exist, cols, dtype)
# allow tokenising/parsing of plaintext
if not fp.endswith('-parsed') and not fp.endswith('-tokenised'):
parsebut.config(state=NORMAL)
tokbut.config(state=NORMAL)
parse_button_text.set('Parse: %s' % os.path.basename(fp))
tokenise_button_text.set('Tokenise: %s' % current_corpus.get())
# disable tokenising and parsing of non plaintxt
else:
parsebut.config(state=NORMAL)
tokbut.config(state=NORMAL)
parse_button_text.set('Parse corpus')
tokenise_button_text.set('Tokenise corpus')
parsebut.config(state=DISABLED)
tokbut.config(state=DISABLED)
# no corefs
if not fp.endswith('-parsed') and not fp.endswith('tokenised'):
#pick_dep_type.config(state=DISABLED)
coref_but.config(state=DISABLED)
#parsebut.config(state=NORMAL)
#speakcheck_build.config(state=NORMAL)
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
#sensplitbut.config(state=NORMAL)
pick_a_datatype.configure(state=DISABLED)
interrobut.configure(state=DISABLED)
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
else:
interrobut_conc.config(state=NORMAL)
recalc_but.config(state=NORMAL)
pick_a_datatype.configure(state=NORMAL)
interrobut.configure(state=NORMAL)
if datatype_picked.get() not in ['Trees']:
coref_but.config(state=NORMAL)
interrobut_conc.config(state=DISABLED)
recalc_but.config(state=DISABLED)
for i in sorted(convert_name_to_query):
# todo: for now --- simplifying gui!
if i.lower() == 'distance from root' or i.lower().startswith('head'):
continue
pick_a_datatype['menu'].add_command(label=i, command=_setit(datatype_picked, i))
#parsebut.config(state=DISABLED)
#speakcheck_build.config(state=DISABLED)
datatype_picked.set('Word')
if not fp.endswith('-tokenised') and not fp.endswith('-parsed'):
pick_a_datatype['menu'].add_command(label='Word', command=_setit(datatype_picked, 'Word'))
else:
datatype_picked.set('Word')
add_subcorpora_to_build_box(fp)
note.progvar.set(0)
if current_corpus.get() in list(corpus_names_and_speakers.keys()):
refresh_by_metadata()
#speakcheck.config(state=NORMAL)
else:
pass
#speakcheck.config(state=DISABLED)
timestring('Set corpus directory: "%s"' % fp)
editf.set('Edit file: ')
parse_only = [ck4, ck5, ck6, ck7, ck9, ck10, ck11, ck12, ck13, ck14, ck15, ck16]
non_parsed = [ck1, ck8]
if 'l' in cols:
non_parsed.append(ck2)
if 'p' in cols:
non_parsed.append(ck3)
if not current_corpus.get().endswith('-parsed'):
for but in parse_only:
desel_and_turn_off(but)
for but in non_parsed:
turnon(but)
else:
for but in parse_only:
turnon(but)
for but in non_parsed:
turnon(but)
if datatype_picked.get() == 'Trees':
ck4.config(state=NORMAL)
else:
ck4.config(state=DISABLED)
refresh_by_metadata()
Label(interro_opt, text='Corpus/subcorpora:').grid(row=0, column=0, sticky=W)
current_corpus = StringVar()
current_corpus.set('Corpus')
available_corpora = OptionMenu(interro_opt, current_corpus, *tuple(('Select corpus')))
available_corpora.config(width=30, state=DISABLED, justify=CENTER)
current_corpus.trace("w", corpus_callback)
available_corpora.grid(row=0, column=0, columnspan=2, padx=(135,0))
available_corpora_build = OptionMenu(tab0, current_corpus, *tuple(('Select corpus')))
available_corpora_build.config(width=25, justify=CENTER, state=DISABLED)
available_corpora_build.grid(row=4, column=0, sticky=W)
ex_additional_criteria = {}
ex_anyall = StringVar()
ex_anyall.set('any')
ex_objs = OrderedDict()
# fill it with null data
for i in range(20):
tmp = StringVar()
tmp.set('')
ex_objs[i] = [None, None, None, tmp]
ex_permref = []
exclude_str = StringVar()
exclude_str.set('')
Label(interro_opt, text='Exclude:').grid(row=8, column=0, sticky=W, pady=(0, 10))
exclude_op = StringVar()
exclude_op.set('None')
exclude = OptionMenu(interro_opt, exclude_op, *['None'] + sorted(convert_name_to_query.keys()))
exclude.config(width=14)
exclude.grid(row=8, column=0, sticky=W, padx=(60, 0), pady=(0, 10))
qr = Entry(interro_opt, textvariable=exclude_str, width=18, state=DISABLED)
qr.grid(row=8, column=0, columnspan=2, sticky=E, padx=(0,40), pady=(0, 10))
all_text_widgets.append(qr)
ex_plusbut = Button(interro_opt, text='+', \
command=lambda: add_criteria(ex_objs, ex_permref, ex_anyall, ex_additional_criteria, \
exclude_op, exclude_str, title = 'Exclude from interrogation'), \
state=DISABLED)
ex_plusbut.grid(row=8, column=1, sticky=E, pady=(0, 10))
#blklst = StringVar()
#Label(interro_opt, text='Blacklist:').grid(row=12, column=0, sticky=W)
##blklst.set(r'^n')
#blklst.set(r'')
#bkbx = Entry(interro_opt, textvariable=blklst, width=22)
#bkbx.grid(row=12, column=0, columnspan=2, sticky=E)
#all_text_widgets.append(bkbx)
def populate_metavals(evt):
"""
Add the values for a metadata field to the subcorpus box
"""
from corpkit.process import get_corpus_metadata
try:
wx = evt.widget
except:
wx = evt
speaker_listbox.configure(state=NORMAL)
speaker_listbox.delete(0, END)
indices = wx.curselection()
if wx.get(indices[0]) != 'none':
speaker_listbox.insert(END, 'ALL')
for index in indices:
value = wx.get(index)
if value == 'files':
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
vals = [i.name for i in corp.all_files]
elif value == 'folders':
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
vals = [i.name for i in corp.subcorpora]
elif value == 'none':
vals = []
else:
meta = get_corpus_metadata(corpus_fullpath.get(), generate=True)
vals = meta['fields'][value]
#vals = get_speaker_names_from_parsed_corpus(corpus_fullpath.get(), value)
for v in vals:
speaker_listbox.insert(END, v)
# lemma tags
#lemtags = tuple(('Off', 'Noun', 'Verb', 'Adjective', 'Adverb'))
#lemtag = StringVar(root)
#lemtag.set('')
#Label(interro_opt, text='Result word class:').grid(row=13, column=0, columnspan=2, sticky=E, padx=(0, 120))
#lmt = OptionMenu(interro_opt, lemtag, *lemtags)
#lmt.config(state=NORMAL, width=10)
#lmt.grid(row=13, column=1, sticky=E)
#lemtag.trace("w", d_callback)
def refresh_by_metadata(*args):
"""
Add metadata for a corpus from dotfile to listbox
"""
import os
if os.path.isdir(corpus_fullpath.get()):
from corpkit.process import get_corpus_metadata
ns = get_corpus_metadata(corpus_fullpath.get(), generate=True)
ns = list(ns.get('fields', {}))
#ns = corpus_names_and_speakers[os.path.basename(corpus_fullpath.get())]
else:
return
speaker_listbox.delete(0, 'end')
# figure out which list we need to add to, and which we should del from
lbs = []
delfrom = []
# todo: this should be, if new corpus, delfrom...
if True:
lbs.append(by_met_listbox)
else:
delfrom.append(by_met_listbox)
# add names
for lb in lbs:
lb.configure(state=NORMAL)
lb.delete(0, END)
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
if corp.level == 'c':
lb.insert(END, 'folders')
lb.insert(END, 'files')
for idz in sorted(ns):
lb.insert(END, idz)
lb.insert(END, 'none')
# or delete names
for lb in delfrom:
lb.configure(state=NORMAL)
lb.delete(0, END)
lb.configure(state=DISABLED)
by_met_listbox.selection_set(0)
populate_metavals(by_met_listbox)
# by metadata
by_meta_scrl = Frame(interro_opt)
by_meta_scrl.grid(row=1, column=0, rowspan=2, sticky='w', padx=(5,0), pady=(5, 5))
# scrollbar for the listbox
by_met_bar = Scrollbar(by_meta_scrl)
by_met_bar.pack(side=RIGHT, fill=Y)
# listbox itself
slist_height = 2 if small_screen else 6
by_met_listbox = Listbox(by_meta_scrl, selectmode=EXTENDED, width=12, height=slist_height,
relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=by_met_bar.set, exportselection=False)
by_met_listbox.pack()
by_met_bar.config(command=by_met_listbox.yview)
xx = by_met_listbox.bind('<<ListboxSelect>>', populate_metavals)
# frame to hold metadata values listbox
spk_scrl = Frame(interro_opt)
spk_scrl.grid(row=1, column=0, rowspan=2, columnspan=2, sticky=E, pady=(5,5))
# scrollbar for the listbox
spk_sbar = Scrollbar(spk_scrl)
spk_sbar.pack(side=RIGHT, fill=Y)
# listbox itself
speaker_listbox = Listbox(spk_scrl, selectmode=EXTENDED, width=29, height=slist_height,
relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=spk_sbar.set, exportselection=False)
speaker_listbox.pack()
speaker_listbox.configure(state=DISABLED)
spk_sbar.config(command=speaker_listbox.yview)
# dep type
#dep_types = tuple(('Basic', 'Collapsed', 'CC-processed'))
#kind_of_dep = StringVar(root)
#kind_of_dep.set('CC-processed')
#Label(interro_opt, text='Dependency type:').grid(row=16, column=0, sticky=W)
#pick_dep_type = OptionMenu(interro_opt, kind_of_dep, *dep_types)
#pick_dep_type.config(state=DISABLED)
#pick_dep_type.grid(row=16, column=0, sticky=W, padx=(125,0))
#kind_of_dep.trace("w", d_callback)
coref = IntVar(root)
coref.set(False)
coref_but = Checkbutton(interro_opt, text='Count coreferents', variable=coref, onvalue=True, offvalue=False)
coref_but.grid(row=6, column=1, sticky=E, pady=(5,0))
coref_but.config(state=DISABLED)
# query
entrytext=StringVar()
Label(interro_opt, text='Query:').grid(row=4, column=0, sticky='NW', pady=(5,0))
entrytext.set(r'\b(m.n|wom.n|child(ren)?)\b')
qa_height = 2 if small_screen else 6
qa = Text(interro_opt, width=40, height=qa_height, borderwidth=0.5,
font=("Courier New", 14), undo=True, relief=SUNKEN, wrap=WORD, highlightthickness=0)
qa.insert(END, entrytext.get())
qa.grid(row=4, column=0, columnspan=2, sticky=E, pady=(5,5), padx=(0, 4))
all_text_widgets.append(qa)
additional_criteria = {}
anyall = StringVar()
anyall.set('all')
objs = OrderedDict()
# fill it with null data
for i in range(20):
tmp = StringVar()
tmp.set('')
objs[i] = [None, None, None, tmp]
permref = []
def add_criteria(objs, permref, anyalltoggle, output_dict,
optvar, enttext, title = "Additional criteria"):
"""this is a popup for adding additional search criteria.
it's also used for excludes"""
if title == 'Additional criteria':
enttext.set(qa.get(1.0, END).strip('\n').strip())
from tkinter import Toplevel
try:
more_criteria = permref[0]
more_criteria.deiconify()
return
except:
pass
more_criteria = Toplevel()
more_criteria.geometry('+500+100')
more_criteria.title(title)
more_criteria.wm_attributes('-topmost', 1)
total = 0
n_items = []
def quit_q(total, *args):
"""exit popup, saving entries"""
poss_keys = []
for index, (option, optvar, entbox, entstring) in enumerate(list(objs.values())[:total]):
if index == 0:
enttext.set(entstring.get())
optvar.set(optvar.get())
datatype_picked.set(optvar.get())
if optvar is not None:
o = convert_name_to_query.get(optvar.get(), optvar.get())
q = entstring.get().strip()
q = remake_special(q, customs=custom_special_dict,
case_sensitive=case_sensitive.get(), return_list=True)
output_dict[o] = q
# may not work on mac ...
if title == 'Additional criteria':
if len(list(objs.values())[:total]) > 0:
plusbut.config(bg='#F4F4F4')
else:
plusbut.config(bg='white')
else:
if len(list(objs.values())[:total]) > 0:
ex_plusbut.config(bg='#F4F4F4')
else:
ex_plusbut.config(bg='white')
more_criteria.withdraw()
def remove_prev():
"""delete last added criteria line"""
if len([k for k, v in objs.items() if v[0] is not None]) < 2:
pass
else:
ans = 0
for k, (a, b, c, d) in reversed(list(objs.items())):
if a is not None:
ans = k
break
if objs[ans][0] is not None:
objs[ans][0].destroy()
optvar = objs[ans][1].get()
try:
del output_dict[convert_name_to_query[optvar]]
except:
pass
objs[ans][1] = StringVar()
if objs[ans][2] is not None:
objs[ans][2].destroy()
objs[ans][3] = StringVar()
objs.pop(ans, None)
def clear_q():
"""clear the popup"""
for optmenu, optvar, entbox, entstring in list(objs.values()):
if optmenu is not None:
optvar.set('Word')
entstring.set('')
def new_item(total, optvar, enttext, init = False):
"""add line to popup"""
for i in n_items:
i.destroy()
for i in n_items:
n_items.remove(i)
chosen = StringVar()
poss = ['None'] + sorted(convert_name_to_query.keys())
poss = [k for k in poss if not 'distance' in k.lower() and not 'head ' in k.lower()]
chosen.set('Word')
opt = OptionMenu(more_criteria, chosen, *poss)
opt.config(width=16)
t = total + 1
opt.grid(row=total, column=0, sticky=W)
text_str = StringVar()
text_str.set('')
text=Entry(more_criteria, textvariable=text_str, width=40, font=("Courier New", 13))
all_text_widgets.append(text)
text.grid(row=total, column=1)
objs[total] = [opt, chosen, text, text_str]
minuser = Button(more_criteria, text='-', command=remove_prev)
minuser.grid(row=total + 2, column=0, sticky=W, padx=(38,0))
plusser = Button(more_criteria, text='+', command=lambda : new_item(t, optvar, enttext))
plusser.grid(row=total + 2, column=0, sticky=W)
stopbut = Button(more_criteria, text='Done', command=lambda : quit_q(t))
stopbut.grid(row=total + 2, column=1, sticky=E)
clearbut = Button(more_criteria, text='Clear', command=clear_q)
clearbut.grid(row=total + 2, column=1, sticky=E, padx=(0, 60))
r1 = Radiobutton(more_criteria, text='Match any', variable=anyalltoggle, value= 'any')
r1.grid(row=total + 2, column=0, columnspan=2, sticky=E, padx=(0,150))
r2 = Radiobutton(more_criteria, text='Match all', variable=anyalltoggle, value= 'all')
r2.grid(row=total + 2, column=0, columnspan=2, sticky=E, padx=(0,250))
n_items.append(plusser)
n_items.append(stopbut)
n_items.append(minuser)
n_items.append(clearbut)
n_items.append(r1)
n_items.append(r2)
if init:
text_str.set(enttext.get())
chosen.set(optvar.get())
minuser.config(state=DISABLED)
else:
minuser.config(state=NORMAL)
return t
if objs:
for optmenu, optvar, entbox, entstring in list(objs.values()):
optmenu.grid()
entbox.grid()
# make the first button with defaults
total = new_item(total, optvar, enttext, init = True)
if more_criteria not in permref:
permref.append(more_criteria)
plusbut = Button(interro_opt, text='+', \
command=lambda: add_criteria(objs, permref, anyall, \
additional_criteria, datatype_picked, entrytext), \
state=NORMAL)
plusbut.grid(row=4, column=0, columnspan=1, padx=(25,0), pady=(10,0), sticky='w')
def entry_callback(*args):
"""when entry is changed, add it to the textbox"""
qa.config(state=NORMAL)
qa.delete(1.0, END)
qa.insert(END, entrytext.get())
entrytext.trace("w", entry_callback)
def onselect(evt):
"""when an option is selected, add the example query
for ngrams, add the special ngram options"""
w = evt.widget
index = int(w.curselection()[0])
value = w.get(index)
w.see(index)
#datatype_chosen_option.set(value)
#datatype_listbox.select_set(index)
#datatype_listbox.see(index)
if qa.get(1.0, END).strip('\n').strip() in list(def_queries.values()):
if qa.get(1.0, END).strip('\n').strip() not in list(qd.values()):
entrytext.set(def_queries[datatype_picked.get()])
#try:
# ngmsize.destroy()
#except:
# pass
#try:
# split_contract.destroy()
#except:
# pass
# boolean interrogation arguments need fixing, right now use 0 and 1
#lem = IntVar()
#lbut = Checkbutton(interro_opt, text="Lemmatise", variable=lem, onvalue=True, offvalue=False)
#lbut.grid(column=0, row=8, sticky=W)
#phras = IntVar()
#mwbut = Checkbutton(interro_opt, text="Multiword results", variable=phras, onvalue=True, offvalue=False)
#mwbut.grid(column=1, row=8, sticky=E)
#tit_fil = IntVar()
#tfbut = Checkbutton(interro_opt, text="Filter titles", variable=tit_fil, onvalue=True, offvalue=False)
#tfbut.grid(row=9, column=0, sticky=W)
case_sensitive = IntVar()
tmp = Checkbutton(interro_opt, text="Case sensitive", variable=case_sensitive, onvalue=True, offvalue=False)
tmp.grid(row=6, column=0, sticky=W, padx=(140,0), pady=(5,0))
no_punct = IntVar()
tmp = Checkbutton(interro_opt, text="Punctuation", variable=no_punct, onvalue=False, offvalue=True)
tmp.deselect()
tmp.grid(row=6, column=0, sticky=W, pady=(5,0))
global ngmsize
Label(interro_opt, text='N-gram size:').grid(row=5, column=0, sticky=W, padx=(220,0), columnspan=2, pady=(5,0))
ngmsize = MyOptionMenu(interro_opt, 'Size','1', '2','3','4','5','6','7','8')
ngmsize.configure(width=12)
ngmsize.grid(row=5, column=1, sticky=E, pady=(5,0))
#ngmsize.config(state=DISABLED)
global collosize
Label(interro_opt, text='Collocation window:').grid(row=5, column=0, sticky=W, pady=(5,0))
collosize = MyOptionMenu(interro_opt, 'Size','1', '2','3','4','5','6','7','8')
collosize.configure(width=8)
collosize.grid(row=5, column=0, sticky=W, padx=(140,0), pady=(5,0))
#collosize.config(state=DISABLED)
#global split_contract
#split_contract = IntVar(root)
#split_contract.set(False)
#split_contract_but = Checkbutton(interro_opt, text='Split contractions', variable=split_contract, onvalue=True, offvalue=False)
#split_contract_but.grid(row=7, column=1, sticky=E)
#Label(interro_opt, text='Spelling:').grid(row=6, column=1, sticky=E, padx=(0, 75))
#spl = MyOptionMenu(interro_opt, 'Off','UK','US')
#spl.configure(width=7)
#spl.grid(row=6, column=1, sticky=E, padx=(2, 0))
def desel_and_turn_off(but):
pass
but.config(state=NORMAL)
but.deselect()
but.config(state=DISABLED)
def turnon(but):
but.config(state=NORMAL)
def callback(*args):
"""if the drop down list for data type changes, fill options"""
#datatype_listbox.delete(0, 'end')
chosen = datatype_picked.get()
#lst = option_dict[chosen]
#for e in lst:
# datatype_listbox.insert(END, e)
notree = [i for i in sorted(convert_name_to_query.keys()) if i != 'Trees']
if chosen == 'Trees':
for but in [ck5, ck6, ck7, ck9, ck10, ck11, ck12, ck13, ck14, ck15, ck16, \
ck17, ck18, ck19, ck20]:
desel_and_turn_off(but)
for but in [ck1, ck2, ck4, ck4, ck8]:
turnon(but)
ck1.select()
#q.config(state=DISABLED)
#qr.config(state=DISABLED)
#exclude.config(state=DISABLED)
#sec_match.config(state=DISABLED)
plusbut.config(state=DISABLED)
ex_plusbut.config(state=DISABLED)
elif chosen in notree:
if current_corpus.get().endswith('-parsed'):
for but in [ck1, ck2, ck3, ck5, ck6, ck7, ck8, ck9, ck10, \
ck11, ck12, ck13, ck14, ck15, ck16, \
ck17, ck18, ck19, ck20, \
plusbut, ex_plusbut, exclude, qr]:
turnon(but)
desel_and_turn_off(ck4)
if chosen == 'Stats':
nametext.set('features')
nametexter.config(state=DISABLED)
else:
nametexter.config(state=NORMAL)
nametext.set('untitled')
if chosen == 'Stats':
for but in [ck2, ck3, ck4, ck5, ck6, ck7, ck8, ck9, ck10, \
ck11, ck12, ck13, ck14, ck15, ck16]:
desel_and_turn_off(but)
turnon(ck1)
ck1.select()
ngmshows = [return_ngm, return_ngm_lemma, return_ngm_func, return_ngm_pos]
#ngmsize.config(state=NORMAL)
#collosize.config(state=NORMAL)
#if qa.get(1.0, END).strip('\n').strip() in def_queries.values() + special_examples.values():
clean_query = qa.get(1.0, END).strip('\n').strip()
acc_for_tups = [i[0] if isinstance(i, tuple) else i for i in list(def_queries.values())]
if (clean_query not in list(qd.values()) and clean_query in acc_for_tups) \
or not clean_query:
try:
# for the life of me i don't know why some are appearing as tuples
found = def_queries.get(chosen, clean_query)
if isinstance(found, tuple):
found = found[0]
entrytext.set(found)
except:
pass
datatype_picked = StringVar(root)
Label(interro_opt, text='Search: ').grid(row=3, column=0, sticky=W, pady=10)
pick_a_datatype = OptionMenu(interro_opt, datatype_picked, *sorted(convert_name_to_query.keys()))
pick_a_datatype.configure(width=30, justify=CENTER)
datatype_picked.set('Word')
pick_a_datatype.grid(row=3, column=0, columnspan=2, sticky=W, padx=(136,0))
datatype_picked.trace("w", callback)
# trees, words, functions, governors, dependents, pos, lemma, count
interro_return_frm = Frame(interro_opt)
Label(interro_return_frm, text=' Return', font=("Courier New", 13, "bold")).grid(row=0, column=0, sticky=E)
interro_return_frm.grid(row=7, column=0, columnspan=2, sticky=W, pady=10, padx=(10,0))
Label(interro_return_frm, text=' Token', font=("Courier New", 13)).grid(row=0, column=1, sticky=E)
Label(interro_return_frm, text=' Lemma', font=("Courier New", 13)).grid(row=0, column=2, sticky=E)
Label(interro_return_frm, text=' POS tag', font=("Courier New", 13)).grid(row=0, column=3, sticky=E)
Label(interro_return_frm, text= 'Function', font=("Courier New", 13)).grid(row=0, column=4, sticky=E)
Label(interro_return_frm, text=' Match', font=("Courier New", 13)).grid(row=1, column=0, sticky=E)
Label(interro_return_frm, text=' Governor', font=("Courier New", 13)).grid(row=2, column=0, sticky=E)
Label(interro_return_frm, text='Dependent', font=("Courier New", 13)).grid(row=3, column=0, sticky=E)
prenext_pos = StringVar(root)
prenext_pos.set('Position')
pick_posi_o = ('-5', '-4', '-3', '-2', '-1', '+1', '+2', '+3', '+4', '+5')
pick_posi_m = OptionMenu(interro_return_frm, prenext_pos, *pick_posi_o)
pick_posi_m.config(width=8)
pick_posi_m.grid(row=4, column=0, sticky=E)
#Label(interro_return_frm, text= 'N-gram', font=("Courier New", 13)).grid(row=4, column=0, sticky=E)
Label(interro_return_frm, text=' Other', font=("Courier New", 13)).grid(row=5, column=0, sticky=E)
Label(interro_return_frm, text=' Count', font=("Courier New", 13)).grid(row=5, column=1, sticky=E)
Label(interro_return_frm, text=' Index', font=("Courier New", 13)).grid(row=5, column=2, sticky=E)
Label(interro_return_frm, text=' Distance', font=("Courier New", 13)).grid(row=5, column=3, sticky=E)
Label(interro_return_frm, text=' Tree', font=("Courier New", 13)).grid(row=5, column=4, sticky=E)
return_token = StringVar()
return_token.set('')
ck1 = Checkbutton(interro_return_frm, variable=return_token, onvalue='w', offvalue = '')
ck1.select()
ck1.grid(row=1, column=1, sticky=E)
def return_token_callback(*args):
if datatype_picked.get() == 'Trees':
if return_token.get():
for but in [ck3, ck4, ck8]:
but.config(state=NORMAL)
but.deselect()
return_token.trace("w", return_token_callback)
return_lemma = StringVar()
return_lemma.set('')
ck2 = Checkbutton(interro_return_frm, anchor=E, variable=return_lemma, onvalue='l', offvalue = '')
ck2.grid(row=1, column=2, sticky=E)
def return_lemma_callback(*args):
if datatype_picked.get() == 'Trees':
if return_lemma.get():
for but in [ck3, ck4, ck8]:
but.config(state=NORMAL)
but.deselect()
lmt.configure(state=NORMAL)
else:
lmt.configure(state=DISABLED)
return_lemma.trace("w", return_lemma_callback)
return_pos = StringVar()
return_pos.set('')
ck3 = Checkbutton(interro_return_frm, variable=return_pos, onvalue='p', offvalue = '')
ck3.grid(row=1, column=3, sticky=E)
def return_pos_callback(*args):
if datatype_picked.get() == 'Trees':
if return_pos.get():
for but in [ck1, ck2, ck4, ck8]:
but.config(state=NORMAL)
but.deselect()
return_pos.trace("w", return_pos_callback)
return_function = StringVar()
return_function.set('')
ck7 = Checkbutton(interro_return_frm, variable=return_function, onvalue='f', offvalue = '')
ck7.grid(row=1, column=4, sticky=E)
return_tree = StringVar()
return_tree.set('')
ck4 = Checkbutton(interro_return_frm, anchor=E, variable=return_tree, onvalue='t', offvalue = '')
ck4.grid(row=6, column=4, sticky=E)
def return_tree_callback(*args):
if datatype_picked.get() == 'Trees':
if return_tree.get():
for but in [ck1, ck2, ck3, ck8]:
but.config(state=NORMAL)
but.deselect()
return_tree.trace("w", return_tree_callback)
return_tree.trace("w", return_tree_callback)
return_index = StringVar()
return_index.set('')
ck5 = Checkbutton(interro_return_frm, anchor=E, variable=return_index, onvalue='i', offvalue = '')
ck5.grid(row=6, column=2, sticky=E)
return_distance = StringVar()
return_distance.set('')
ck6 = Checkbutton(interro_return_frm, anchor=E, variable=return_distance, onvalue='a', offvalue = '')
ck6.grid(row=6, column=3, sticky=E)
return_count = StringVar()
return_count.set('')
ck8 = Checkbutton(interro_return_frm, variable=return_count, onvalue='c', offvalue = '')
ck8.grid(row=6, column=1, sticky=E)
def countmode(*args):
ngmshows = [return_ngm, return_ngm_lemma, return_ngm_func, return_ngm_pos]
ngmbuts = [ck17, ck18, ck19, ck20]
if any(ngmshow.get() for ngmshow in ngmshows):
return
if datatype_picked.get() != 'Trees':
buttons = [ck1, ck2, ck3, ck4, ck5, ck6, ck7, ck9,
ck10, ck11, ck12, ck13, ck14, ck15, ck16,
ck17, ck18, ck19, ck20]
if return_count.get() == 'c':
for b in buttons:
desel_and_turn_off(b)
ck8.config(state=NORMAL)
else:
for b in buttons:
b.config(state=NORMAL)
callback()
else:
if return_count.get():
for but in [ck1, ck2, ck3, ck4]:
but.config(state=NORMAL)
but.deselect()
return_count.trace("w", countmode)
return_gov = StringVar()
return_gov.set('')
ck9 = Checkbutton(interro_return_frm, variable=return_gov,
onvalue='gw', offvalue = '')
ck9.grid(row=2, column=1, sticky=E)
return_gov_lemma = StringVar()
return_gov_lemma.set('')
ck10 = Checkbutton(interro_return_frm, variable=return_gov_lemma,
onvalue='gl', offvalue = '')
ck10.grid(row=2, column=2, sticky=E)
return_gov_pos = StringVar()
return_gov_pos.set('')
ck11 = Checkbutton(interro_return_frm, variable=return_gov_pos,
onvalue='gp', offvalue = '')
ck11.grid(row=2, column=3, sticky=E)
return_gov_func = StringVar()
return_gov_func.set('')
ck12 = Checkbutton(interro_return_frm, variable=return_gov_func,
onvalue='gf', offvalue = '')
ck12.grid(row=2, column=4, sticky=E)
return_dep = StringVar()
return_dep.set('')
ck13 = Checkbutton(interro_return_frm, variable=return_dep,
onvalue='dw', offvalue = '')
ck13.grid(row=3, column=1, sticky=E)
return_dep_lemma = StringVar()
return_dep_lemma.set('')
ck14 = Checkbutton(interro_return_frm, variable=return_dep_lemma,
onvalue='dl', offvalue = '')
ck14.grid(row=3, column=2, sticky=E)
return_dep_pos = StringVar()
return_dep_pos.set('')
ck15 = Checkbutton(interro_return_frm, variable=return_dep_pos,
onvalue='dp', offvalue = '')
ck15.grid(row=3, column=3, sticky=E)
return_dep_func = StringVar()
return_dep_func.set('')
ck16 = Checkbutton(interro_return_frm, variable=return_dep_func,
onvalue='df', offvalue = '')
ck16.grid(row=3, column=4, sticky=E)
return_ngm = StringVar()
return_ngm.set('')
ck17 = Checkbutton(interro_return_frm, variable=return_ngm,
onvalue='w', offvalue = '')
ck17.grid(row=4, column=1, sticky=E)
return_ngm_lemma = StringVar()
return_ngm_lemma.set('')
ck18 = Checkbutton(interro_return_frm, variable=return_ngm_lemma,
onvalue='l', offvalue = '')
ck18.grid(row=4, column=2, sticky=E)
return_ngm_pos = StringVar()
return_ngm_pos.set('')
ck19 = Checkbutton(interro_return_frm, variable=return_ngm_pos,
onvalue='p', offvalue = '')
ck19.grid(row=4, column=3, sticky=E)
return_ngm_func = StringVar()
return_ngm_func.set('')
ck20 = Checkbutton(interro_return_frm, variable=return_ngm_func,
onvalue='f', offvalue = '', state=DISABLED)
ck20.grid(row=4, column=4, sticky=E)
def q_callback(*args):
qa.configure(state=NORMAL)
qr.configure(state=NORMAL)
#queries = tuple(('Off', 'Any', 'Participants', 'Processes', 'Subjects', 'Stats'))
#special_queries = StringVar(root)
#special_queries.set('Off')
#Label(interro_opt, text='Preset:').grid(row=7, column=0, sticky=W)
#pick_a_query = OptionMenu(interro_opt, special_queries, *queries)
#pick_a_query.config(width=11, state=DISABLED)
#pick_a_query.grid(row=7, column=0, padx=(60, 0), columnspan=2, sticky=W)
#special_queries.trace("w", q_callback)
# Interrogation name
nametext=StringVar()
nametext.set('untitled')
Label(interro_opt, text='Interrogation name:').grid(row=17, column=0, sticky=W)
nametexter = Entry(interro_opt, textvariable=nametext, width=15)
nametexter.grid(row=17, column=1, sticky=E)
all_text_widgets.append(nametexter)
def show_help(kind):
kindict = {'h': 'http://interrogator.github.io/corpkit/doc_help.html',
'q': 'http://interrogator.github.io/corpkit/doc_interrogate.html#trees',
't': 'http://interrogator.github.io/corpkit/doc_troubleshooting.html'}
import webbrowser
webbrowser.open_new(kindict[kind])
# query help, interrogate button
#Button(interro_opt, text='Query help', command=query_help).grid(row=14, column=0, sticky=W)
interrobut = Button(interro_opt, text='Interrogate')
interrobut.config(command=lambda: runner(interrobut, do_interrogation, conc=True), state=DISABLED)
interrobut.grid(row=18, column=1, sticky=E)
# name to show above spreadsheet 0
i_resultname = StringVar()
def change_interro_spread(*args):
if name_of_interro_spreadsheet.get():
#savdict.config(state=NORMAL)
updbut.config(state=NORMAL)
else:
#savdict.config(state=DISABLED)
updbut.config(state=DISABLED)
name_of_interro_spreadsheet = StringVar()
name_of_interro_spreadsheet.set('')
name_of_interro_spreadsheet.trace("w", change_interro_spread)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
# make spreadsheet frames for interrogate pane
wdth = int(note_width * 0.50)
interro_right = Frame(tab1, width=wdth)
interro_right.grid(row=0, column=1, sticky=N)
interro_results = Frame(interro_right, height=40, width=wdth, borderwidth=2)
interro_results.grid(column=0, row=0, padx=20, pady=(20,0), sticky='N', columnspan=4)
interro_totals = Frame(interro_right, height=1, width=20, borderwidth=2)
interro_totals.grid(column=0, row=1, padx=20, columnspan=4)
llab = Label(interro_right, textvariable=i_resultname,
font=("Helvetica", 13, "bold"))
llab.grid(row=0, column=0, sticky='NW', padx=20, pady=0)
llab.lift()
# show nothing yet
update_spreadsheet(interro_results, df_to_show=None, height=450, width=wdth)
update_spreadsheet(interro_totals, df_to_show=None, height=10, width=wdth)
#global prev
four_interro_under = Frame(interro_right, width=wdth)
four_interro_under.grid(row=3, column=0, sticky='ew', padx=(20,0))
prev = Button(four_interro_under, text='Previous', command=show_prev)
prev.pack(side='left', expand=True)
#global nex
nex = Button(four_interro_under, text='Next', command=show_next)
nex.pack(side='left', expand=True, padx=(0,50))
if len(list(all_interrogations.keys())) < 2:
nex.configure(state=DISABLED)
prev.configure(state=DISABLED)
#savdict = Button(four_interro_under, text='Save as dictionary', command=save_as_dictionary)
#savdict.config(state=DISABLED)
#savdict.pack(side='right', expand=True)
updbut = Button(four_interro_under, text='Update interrogation', command=lambda: update_all_interrogations(pane='interrogate'))
updbut.pack(side='right', expand=True)
updbut.config(state=DISABLED)
############## ############## ############## ############## ##############
# EDITOR TAB # # EDITOR TAB # # EDITOR TAB # # EDITOR TAB # # EDITOR TAB #
############## ############## ############## ############## ##############
editor_buttons = Frame(tab2)
editor_buttons.grid(row=0, column=0, sticky='NW')
def do_editing():
"""
What happens when you press edit
"""
edbut.config(state=DISABLED)
import os
import pandas as pd
from corpkit.editor import editor
# translate operation into interrogator input
operation_text=opp.get()
if operation_text == 'None' or operation_text == 'Select an operation':
operation_text=None
else:
operation_text=opp.get()[0]
if opp.get() == u"\u00F7":
operation_text='/'
if opp.get() == u"\u00D7":
operation_text='*'
if opp.get() == '%-diff':
operation_text='d'
if opp.get() == 'rel. dist.':
operation_text='a'
# translate dataframe2
data2 = data2_pick.get()
if data2 == 'None' or data2 == '':
data2 = False
elif data2 == 'Self':
data2 = 'self'
elif data2 in ['features', 'postags', 'wordclasses']:
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
data2 = getattr(corp, data2_pick.get())
#todo: populate results/totals with possibilities for features etc
elif data2 is not False:
if df2branch.get() == 'results':
try:
data2 = getattr(all_interrogations[data2], df2branch.get())
except AttributeError:
timestring('Denominator has no results attribute.')
return
elif df2branch.get() == 'totals':
try:
data2 = getattr(all_interrogations[data2], df2branch.get())
except AttributeError:
timestring('Denominator has no totals attribute.')
return
if transpose.get():
try:
data2 = data2.T
except:
pass
the_data = all_interrogations[name_of_o_ed_spread.get()]
if df1branch.get() == 'results':
if not hasattr(the_data, 'results'):
timestring('Interrogation has no results attribute.')
return
elif df1branch.get() == 'totals':
data1 = the_data.totals
if (spl_editor.var).get() == 'Off' or (spl_editor.var).get() == 'Convert spelling':
spel = False
else:
spel = (spl_editor.var).get()
# editor kwargs
editor_args = {'operation': operation_text,
'dataframe2': data2,
'spelling': spel,
'sort_by': sort_trans[sort_val.get()],
'df1_always_df': True,
'root': root,
'note': note,
'packdir': rd,
'p': p_val.get()}
if do_sub.get() == 'Merge':
editor_args['merge_subcorpora'] = subc_sel_vals
elif do_sub.get() == 'Keep':
editor_args['just_subcorpora'] = subc_sel_vals
elif do_sub.get() == 'Span':
editor_args['span_subcorpora'] = subc_sel_vals
elif do_sub.get() == 'Skip':
editor_args['skip_subcorpora'] = subc_sel_vals
if toreplace_string.get() != '':
if replacewith_string.get() == '':
replacetup = toreplace_string.get()
else:
replacetup = (toreplace_string.get(), replacewith_string.get())
editor_args['replace_names'] = replacetup
# special query: add to this list!
#if special_queries.get() != 'Off':
#query = spec_quer_translate[special_queries.get()]
entry_do_with = entry_regex.get()
# allow list queries
if entry_do_with.startswith('[') and entry_do_with.endswith(']') and ',' in entry_do_with:
entry_do_with = entry_do_with.lower().lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
else:
# convert special stuff
re.compile(entry_do_with)
entry_do_with = remake_special(entry_do_with, customs=custom_special_dict,
case_sensitive=case_sensitive.get(),
return_list=True)
if entry_do_with is False:
return
if do_with_entries.get() == 'Merge':
editor_args['merge_entries'] = entry_do_with
nn = newname_var.get()
if nn == '':
editor_args['newname'] = False
elif is_number(nn):
editor_args['newname'] = int(nn)
else:
editor_args['newname'] = nn
elif do_with_entries.get() == 'Keep':
editor_args['just_entries'] = entry_do_with
elif do_with_entries.get() == 'Skip':
editor_args['skip_entries'] = entry_do_with
if new_subc_name.get() != '':
editor_args['new_subcorpus_name'] = new_subc_name.get()
if newname_var.get() != '':
editor_args['new_subcorpus_name'] = newname_var.get()
if keep_stats_setting.get() == 1:
editor_args['keep_stats'] = True
if rem_abv_p_set.get() == 1:
editor_args['remove_above_p'] = True
if just_tot_setting.get() == 1:
editor_args['just_totals'] = True
if keeptopnum.get() != 'all':
try:
numtokeep = int(keeptopnum.get())
except ValueError:
timestring('Keep top n results value must be number.')
return
editor_args['keep_top'] = numtokeep
# do editing
r = the_data.edit(branch=df1branch.get(), **editor_args)
if transpose.get():
try:
r.results = r.results.T
except:
pass
try:
r.totals = r.totals.T
except:
pass
if isinstance(r, str):
if r == 'linregress':
return
if not r:
timestring('Editing caused an error.')
return
if len(list(r.results.columns)) == 0:
timestring('Editing removed all results.')
return
# drop over 1000?
# results should now always be dataframes, so this if is redundant
if isinstance(r.results, pd.DataFrame):
large = [n for i, n in enumerate(list(r.results.columns)) if i > 9999]
r.results.drop(large, axis=1, inplace=True)
timestring('Result editing completed successfully.')
# name the edit
the_name = namer(edit_nametext.get(), type_of_data = 'edited')
# add edit to master dict
all_interrogations[the_name] = r
# update edited results speadsheet name
name_of_n_ed_spread.set(list(all_interrogations.keys())[-1])
editoname.set('Edited results: %s' % str(name_of_n_ed_spread.get()))
# add current subcorpora to editor menu
for subcl in [subc_listbox]:
#subcl.configure(state=NORMAL)
subcl.delete(0, 'end')
for e in list(r.results.index):
if e != 'tkintertable-order':
subcl.insert(END, e)
#subcl.configure(state=DISABLED)
# update edited spreadsheets
most_recent = all_interrogations[list(all_interrogations.keys())[-1]]
if most_recent.results is not None:
update_spreadsheet(n_editor_results, most_recent.results, height=140)
update_spreadsheet(n_editor_totals, pd.DataFrame(most_recent.totals, dtype=object), height=10)
# finish up
refresh()
# reset some buttons that the user probably wants reset
opp.set('None')
data2_pick.set('Self')
# restore button
def df2_callback(*args):
try:
thisdata = all_interrogations[data2_pick.get()]
except KeyError:
return
if thisdata.results is not None:
df2box.config(state=NORMAL)
else:
df2box.config(state=NORMAL)
df2branch.set('totals')
df2box.config(state=DISABLED)
def df_callback(*args):
"""show names and spreadsheets for what is selected as result to edit
also, hide the edited results section"""
if selected_to_edit.get() != 'None':
edbut.config(state=NORMAL)
name_of_o_ed_spread.set(selected_to_edit.get())
thisdata = all_interrogations[selected_to_edit.get()]
resultname.set('Results to edit: %s' % str(name_of_o_ed_spread.get()))
if thisdata.results is not None:
update_spreadsheet(o_editor_results, thisdata.results, height=140)
df1box.config(state=NORMAL)
else:
df1box.config(state=NORMAL)
df1branch.set('totals')
df1box.config(state=DISABLED)
update_spreadsheet(o_editor_results, df_to_show=None, height=140)
if thisdata.totals is not None:
update_spreadsheet(o_editor_totals, thisdata.totals, height=10)
#df1box.config(state=NORMAL)
#else:
#update_spreadsheet(o_editor_totals, df_to_show=None, height=10)
#df1box.config(state=NORMAL)
#df1branch.set('results')
#df1box.config(state=DISABLED)
else:
edbut.config(state=DISABLED)
name_of_n_ed_spread.set('')
editoname.set('Edited results: %s' % str(name_of_n_ed_spread.get()))
update_spreadsheet(n_editor_results, df_to_show=None, height=140)
update_spreadsheet(n_editor_totals, df_to_show=None, height=10)
for subcl in [subc_listbox]:
subcl.configure(state=NORMAL)
subcl.delete(0, 'end')
if name_of_o_ed_spread.get() != '':
if thisdata.results is not None:
cols = list(thisdata.results.index)
else:
cols = list(thisdata.totals.index)
for e in cols:
if e != 'tkintertable-order':
subcl.insert(END, e)
do_sub.set('Off')
do_with_entries.set('Off')
# result to edit
tup = tuple([i for i in list(all_interrogations.keys())])
selected_to_edit = StringVar(root)
selected_to_edit.set('None')
x = Label(editor_buttons, text='To edit', font=("Helvetica", 13, "bold"))
x.grid(row=0, column=0, sticky=W)
dataframe1s = OptionMenu(editor_buttons, selected_to_edit, *tup)
dataframe1s.config(width=25)
dataframe1s.grid(row=1, column=0, columnspan=2, sticky=W)
selected_to_edit.trace("w", df_callback)
# DF1 branch selection
df1branch = StringVar()
df1branch.set('results')
df1box = OptionMenu(editor_buttons, df1branch, 'results', 'totals')
df1box.config(width=11, state=DISABLED)
df1box.grid(row=1, column=1, sticky=E)
def op_callback(*args):
if opp.get() != 'None':
dataframe2s.config(state=NORMAL)
df2box.config(state=NORMAL)
if opp.get() == 'keywords' or opp.get() == '%-diff':
df2branch.set('results')
elif opp.get() == 'None':
dataframe2s.config(state=DISABLED)
df2box.config(state=DISABLED)
# operation for editor
opp = StringVar(root)
opp.set('None')
operations = ('None', '%', u"\u00D7", u"\u00F7", '-', '+', 'combine', 'keywords', '%-diff', 'rel. dist.')
Label(editor_buttons, text='Operation and denominator', font=("Helvetica", 13, "bold")).grid(row=2, column=0, sticky=W, pady=(15,0))
ops = OptionMenu(editor_buttons, opp, *operations)
ops.grid(row=3, column=0, sticky=W)
opp.trace("w", op_callback)
# DF2 option for editor
tups = tuple(['Self'] + [i for i in list(all_interrogations.keys())])
data2_pick = StringVar(root)
data2_pick.set('Self')
#Label(tab2, text='Denominator:').grid(row=3, column=0, sticky=W)
dataframe2s = OptionMenu(editor_buttons, data2_pick, *tups)
dataframe2s.config(state=DISABLED, width=16)
dataframe2s.grid(row=3, column=0, columnspan=2, sticky='NW', padx=(110,0))
data2_pick.trace("w", df2_callback)
# DF2 branch selection
df2branch = StringVar(root)
df2branch.set('totals')
df2box = OptionMenu(editor_buttons, df2branch, 'results', 'totals')
df2box.config(state=DISABLED, width=11)
df2box.grid(row=3, column=1, sticky=E)
# sort by
Label(editor_buttons, text='Sort results by', font=("Helvetica", 13, "bold")).grid(row=4, column=0, sticky=W, pady=(15,0))
sort_val = StringVar(root)
sort_val.set('None')
poss = ['None', 'Total', 'Inverse total', 'Name','Increase',
'Decrease', 'Static', 'Turbulent', 'P value', 'Reverse']
sorts = OptionMenu(editor_buttons, sort_val, *poss)
sorts.config(width=11)
sorts.grid(row=4, column=1, sticky=E, pady=(15,0))
# spelling again
Label(editor_buttons, text='Spelling:').grid(row=5, column=0, sticky=W, pady=(15,0))
spl_editor = MyOptionMenu(editor_buttons, 'Off','UK','US')
spl_editor.grid(row=5, column=1, sticky=E, pady=(15,0))
spl_editor.configure(width=10)
# keep_top
Label(editor_buttons, text='Keep top results:').grid(row=6, column=0, sticky=W)
keeptopnum = StringVar()
keeptopnum.set('all')
keeptopbox = Entry(editor_buttons, textvariable=keeptopnum, width=5)
keeptopbox.grid(column=1, row=6, sticky=E)
all_text_widgets.append(keeptopbox)
# currently broken: just totals button
just_tot_setting = IntVar()
just_tot_but = Checkbutton(editor_buttons, text="Just totals", variable=just_tot_setting, state=DISABLED)
#just_tot_but.select()
just_tot_but.grid(column=0, row=7, sticky=W)
keep_stats_setting = IntVar()
keep_stat_but = Checkbutton(editor_buttons, text="Keep stats", variable=keep_stats_setting)
#keep_stat_but.select()
keep_stat_but.grid(column=1, row=7, sticky=E)
rem_abv_p_set = IntVar()
rem_abv_p_but = Checkbutton(editor_buttons, text="Remove above p", variable=rem_abv_p_set)
#rem_abv_p_but.select()
rem_abv_p_but.grid(column=0, row=8, sticky=W)
# transpose
transpose = IntVar()
trans_but = Checkbutton(editor_buttons, text="Transpose", variable=transpose, onvalue=True, offvalue=False)
trans_but.grid(column=1, row=8, sticky=E)
# entries + entry field for regex, off, skip, keep, merge
Label(editor_buttons, text='Edit entries', font=("Helvetica", 13, "bold")).grid(row=9, column=0, sticky=W, pady=(15, 0))
# edit entries regex box
entry_regex = StringVar()
entry_regex.set(r'.*ing$')
edit_box = Entry(editor_buttons, textvariable=entry_regex, width=23, state=DISABLED, font=("Courier New", 13))
edit_box.grid(row=10, column=1, sticky=E)
all_text_widgets.append(edit_box)
# merge entries newname
Label(editor_buttons, text='Merge name:').grid(row=11, column=0, sticky=W)
newname_var = StringVar()
newname_var.set('')
mergen = Entry(editor_buttons, textvariable=newname_var, width=23, state=DISABLED, font=("Courier New", 13))
mergen.grid(row=11, column=1, sticky=E)
all_text_widgets.append(mergen)
Label(editor_buttons, text='Replace in entry names:').grid(row=12, column=0, sticky=W)
Label(editor_buttons, text='Replace with:').grid(row=12, column=1, sticky=W)
toreplace_string = StringVar()
toreplace_string.set('')
replacewith_string = StringVar()
replacewith_string.set('')
toreplace = Entry(editor_buttons, textvariable=toreplace_string, font=("Courier New", 13))
toreplace.grid(row=13, column=0, sticky=W)
all_text_widgets.append(toreplace)
replacewith = Entry(editor_buttons, textvariable=replacewith_string, font=("Courier New", 13), width=23)
replacewith.grid(row=13, column=1, sticky=E)
all_text_widgets.append(replacewith)
def do_w_callback(*args):
"""if not merging entries, diable input fields"""
if do_with_entries.get() != 'Off':
edit_box.configure(state=NORMAL)
else:
edit_box.configure(state=DISABLED)
if do_with_entries.get() == 'Merge':
mergen.configure(state=NORMAL)
else:
mergen.configure(state=DISABLED)
# options for editing entries
do_with_entries = StringVar(root)
do_with_entries.set('Off')
edit_ent_op = ('Off', 'Skip', 'Keep', 'Merge')
ed_op = OptionMenu(editor_buttons, do_with_entries, *edit_ent_op)
ed_op.grid(row=10, column=0, sticky=W)
do_with_entries.trace("w", do_w_callback)
def onselect_subc(evt):
"""get selected subcorpora: this probably doesn't need to be
a callback, as they are only needed during do_edit"""
for i in subc_sel_vals:
subc_sel_vals.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in subc_sel_vals:
subc_sel_vals.append(value)
def do_s_callback(*args):
"""hide subcorpora edit options if off"""
if do_sub.get() != 'Off':
pass
#subc_listbox.configure(state=NORMAL)
else:
pass
#subc_listbox.configure(state=DISABLED)
if do_sub.get() == 'Merge':
merge.configure(state=NORMAL)
else:
merge.configure(state=DISABLED)
# subcorpora + optionmenu off, skip, keep
Label(editor_buttons, text='Edit subcorpora', font=("Helvetica", 13, "bold")).grid(row=14, column=0, sticky=W, pady=(15,0))
edit_sub_f = Frame(editor_buttons)
edit_sub_f.grid(row=14, column=1, rowspan = 5, sticky=E, pady=(20,0))
edsub_scbr = Scrollbar(edit_sub_f)
edsub_scbr.pack(side=RIGHT, fill=Y)
subc_listbox = Listbox(edit_sub_f, selectmode = EXTENDED, height=5, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=edsub_scbr.set, exportselection=False)
subc_listbox.pack(fill=BOTH)
edsub_scbr.config(command=subc_listbox.yview)
xx = subc_listbox.bind('<<ListboxSelect>>', onselect_subc)
subc_listbox.select_set(0)
# subcorpora edit options
do_sub = StringVar(root)
do_sub.set('Off')
do_with_subc = OptionMenu(editor_buttons, do_sub, *('Off', 'Skip', 'Keep', 'Merge', 'Span'))
do_with_subc.grid(row=15, column=0, sticky=W)
do_sub.trace("w", do_s_callback)
# subcorpora merge name
Label(editor_buttons, text='Merge name:').grid(row=16, column=0, sticky='NW')
new_subc_name = StringVar()
new_subc_name.set('')
merge = Entry(editor_buttons, textvariable=new_subc_name, state=DISABLED, font=("Courier New", 13))
merge.grid(row=17, column=0, sticky='SW', pady=(0, 10))
all_text_widgets.append(merge)
# name the edit
edit_nametext=StringVar()
edit_nametext.set('untitled')
Label(editor_buttons, text='Edit name', font=("Helvetica", 13, "bold")).grid(row=19, column=0, sticky=W)
msn = Entry(editor_buttons, textvariable=edit_nametext, width=18)
msn.grid(row=20, column=0, sticky=W)
all_text_widgets.append(msn)
# edit button
edbut = Button(editor_buttons, text='Edit')
edbut.config(command=lambda: runner(edbut, do_editing), state=DISABLED)
edbut.grid(row=20, column=1, sticky=E)
def editor_spreadsheet_showing_something(*args):
"""if there is anything in an editor window, allow spreadsheet edit button"""
if name_of_o_ed_spread.get():
upd_ed_but.config(state=NORMAL)
else:
upd_ed_but.config(state=DISABLED)
# show spreadsheets
e_wdth = int(note_width * 0.55)
editor_sheets = Frame(tab2)
editor_sheets.grid(column=1, row=0, sticky='NE')
resultname = StringVar()
name_of_o_ed_spread = StringVar()
name_of_o_ed_spread.set('')
name_of_o_ed_spread.trace("w", editor_spreadsheet_showing_something)
resultname.set('Results to edit: %s' % str(name_of_o_ed_spread.get()))
o_editor_results = Frame(editor_sheets, height=28, width=20)
o_editor_results.grid(column=1, row=1, rowspan=1, padx=(20, 0), sticky=N)
Label(editor_sheets, textvariable=resultname,
font=("Helvetica", 13, "bold")).grid(row=0,
column=1, sticky='NW', padx=(20,0))
#Label(editor_sheets, text='Totals to edit:',
#font=("Helvetica", 13, "bold")).grid(row=4,
#column=1, sticky=W, pady=0)
o_editor_totals = Frame(editor_sheets, height=1, width=20)
o_editor_totals.grid(column=1, row=1, rowspan=1, padx=(20,0), sticky=N, pady=(220,0))
update_spreadsheet(o_editor_results, df_to_show=None, height=160, width=e_wdth)
update_spreadsheet(o_editor_totals, df_to_show=None, height=10, width=e_wdth)
editoname = StringVar()
name_of_n_ed_spread = StringVar()
name_of_n_ed_spread.set('')
editoname.set('Edited results: %s' % str(name_of_n_ed_spread.get()))
Label(editor_sheets, textvariable=editoname,
font=("Helvetica", 13, "bold")).grid(row=1,
column=1, sticky='NW', padx=(20,0), pady=(290,0))
n_editor_results = Frame(editor_sheets, height=28, width=20)
n_editor_results.grid(column=1, row=1, rowspan=1, sticky=N, padx=(20,0), pady=(310,0))
#Label(editor_sheets, text='Edited totals:',
#font=("Helvetica", 13, "bold")).grid(row=15,
#column=1, sticky=W, padx=20, pady=0)
n_editor_totals = Frame(editor_sheets, height=1, width=20)
n_editor_totals.grid(column=1, row=1, rowspan=1, padx=(20,0), pady=(500,0))
update_spreadsheet(n_editor_results, df_to_show=None, height=160, width=e_wdth)
update_spreadsheet(n_editor_totals, df_to_show=None, height=10, width=e_wdth)
# add button to update
upd_ed_but = Button(editor_sheets, text='Update interrogation(s)', command=lambda: update_all_interrogations(pane='edit'))
if not small_screen:
upd_ed_but.grid(row=1, column=1, sticky=E, padx=(0, 40), pady=(594, 0))
else:
upd_ed_but.grid(row=0, column=1, sticky='NE', padx=(20,0))
upd_ed_but.config(state=DISABLED)
################# ################# ################# #################
# VISUALISE TAB # # VISUALISE TAB # # VISUALISE TAB # # VISUALISE TAB #
################# ################# ################# #################
plot_option_frame = Frame(tab3)
plot_option_frame.grid(row=0, column=0, sticky='NW')
def do_plotting():
"""when you press plot"""
plotbut.config(state=DISABLED)
# junk for showing the plot in tkinter
for i in oldplotframe:
i.destroy()
import matplotlib
matplotlib.use('TkAgg')
#from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# implement the default mpl key bindings
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from corpkit.plotter import plotter
if data_to_plot.get() == 'None':
timestring('No data selected to plot.')
return
if plotbranch.get() == 'results':
if all_interrogations[data_to_plot.get()].results is None:
timestring('No results branch to plot.')
return
what_to_plot = all_interrogations[data_to_plot.get()].results
elif plotbranch.get() == 'totals':
if all_interrogations[data_to_plot.get()].totals is None:
timestring('No totals branch to plot.')
return
what_to_plot = all_interrogations[data_to_plot.get()].totals
if single_entry.get() != 'All':
what_to_plot = what_to_plot[single_entry.get()]
if single_sbcp.get() != 'All':
what_to_plot = what_to_plot.ix[single_sbcp.get()]
if transpose_vis.get():
if plotbranch.get() != 'totals':
what_to_plot = what_to_plot.T
# determine num to plot
def determine_num_to_plot(num):
"""translate num to num_to_plot"""
try:
num = int(num)
except:
if num.lower() == 'all':
num = 'all'
else:
num = 7
number_to_plot.set('7')
return num
num = determine_num_to_plot(number_to_plot.get())
the_kind = charttype.get()
if the_kind == 'Type of chart':
the_kind = 'line'
# plotter options
d = {'num_to_plot': num,
'kind': the_kind,
'indices': False}
if the_kind == 'heatmap':
d['robust'] = True
#the_style =
#if the_style == 'matplotlib':
#lgd = plt.legend(handles[: the_style = False
d['style'] = plot_style.get()
# explode option
if explbox.get() != '' and charttype.get() == 'pie':
if explbox.get().startswith('[') and explbox.get().endswith(']') and ',' in explbox.get():
explval = explbox.get().lstrip('[').rstrip(']').replace("'", '').replace('"', '').replace(' ', '').split(',')
else:
explval = explbox.get().strip()
explval = remake_special(explval, customs=custom_special_dict,
case_sensitive=case_sensitive.get())
d['explode'] = explval
# this code is ridiculous
d['tex'] = bool(texuse.get())
d['black_and_white'] = bool(bw.get())
d['reverse_legend'] = bool(rl.get())
d['subplots'] = bool(sbplt.get())
if bool(sbplt.get()):
d['layout'] = (int(lay1.get()), int(lay2.get()))
d['grid'] = bool(gridv.get())
d['stacked'] = bool(stackd.get())
d['partial_pie'] = bool(part_pie.get())
d['filled'] = bool(filledvar.get())
d['logx'] = bool(log_x.get())
d['logy'] = bool(log_y.get())
if x_axis_l.get() != '':
d['x_label'] = x_axis_l.get()
if x_axis_l.get() == 'None':
d['x_label'] = False
if y_axis_l.get() != '':
d['y_label'] = y_axis_l.get()
if y_axis_l.get() == 'None':
d['y_label'] = False
d['cumulative'] = bool(cumul.get())
d['colours'] = chart_cols.get()
legend_loc = legloc.get()
if legend_loc == 'none':
d['legend'] = False
else:
d['legend_pos'] = legend_loc
if showtot.get() == 'legend + plot':
d['show_totals'] = 'both'
else:
d['show_totals'] = showtot.get()
d['figsize'] = (int(figsiz1.get()), int(figsiz2.get()))
if len(what_to_plot.index) == 1:
what_to_plot = what_to_plot.ix[what_to_plot.index[0]]
if debug:
print('Plotter args:', what_to_plot, plotnametext.get(), d)
f = plotter(what_to_plot, plotnametext.get(), **d)
# latex error
#except RuntimeError as e:
# s = str(e)
# print(s)
# split_report = s.strip().split('Here is the full report generated by LaTeX:')
# try:
# if len(split_report) > 0 and split_report[1] != '':
# timestring('LaTeX error: %s' % split_report[1])
# except:
# timestring('LaTeX error: %s' % split_report)
# else:
# timestring('No TeX distribution found. Disabling TeX option.')
# texuse.set(0)
# tbut.config(state=DISABLED)
#
# return
timestring('%s plotted.' % plotnametext.get())
del oldplotframe[:]
def getScrollingCanvas(frame):
"""
Adds a new canvas with scroll bars to the argument frame
NB: uses grid layout
return: the newly created canvas
"""
frame.grid(column=1, row=0, rowspan = 1, padx=(15, 15), pady=(40, 0), columnspan=3, sticky='NW')
#frame.rowconfigure(0, weight=9)
#frame.columnconfigure(0, weight=9)
fig_frame_height = 440 if small_screen else 500
canvas = Canvas(frame, width=980, height=fig_frame_height)
xScrollbar = Scrollbar(frame, orient=HORIZONTAL)
yScrollbar = Scrollbar(frame)
xScrollbar.pack(side=BOTTOM,fill=X)
yScrollbar.pack(side=RIGHT,fill=Y)
canvas.config(xscrollcommand=xScrollbar.set)
xScrollbar.config(command=canvas.xview)
canvas.config(yscrollcommand=yScrollbar.set)
yScrollbar.config(command=canvas.yview)
canvas.pack(side=LEFT,expand=True,fill=BOTH)
return canvas
frame_for_fig = Frame(tab3)
#frame_for_fig
scrollC = getScrollingCanvas(frame_for_fig)
mplCanvas = FigureCanvasTkAgg(f.gcf(), frame_for_fig)
mplCanvas._tkcanvas.config(highlightthickness=0)
canvas = mplCanvas.get_tk_widget()
canvas.pack()
if frame_for_fig not in boxes:
boxes.append(frame_for_fig)
scrollC.create_window(0, 0, window=canvas)
scrollC.config(scrollregion=scrollC.bbox(ALL))
#hbar=Scrollbar(frame_for_fig,orient=HORIZONTAL)
#hbar.pack(side=BOTTOM,fill=X)
#hbar.config(command=canvas.get_tk_widget().xview)
#vbar=Scrollbar(frame_for_fig,orient=VERTICAL)
#vbar.pack(side=RIGHT,fill=Y)
#vbar.config(command=canvas.get_tk_widget().yview)
##canvas.config(width=300,height=300)
#canvas.config(xscrollcommand=hbar.set, yscrollcommand=vbar.set)
#canvas.pack(side=LEFT,expand=True,fill=BOTH)
try:
mplCanvas.show()
except RuntimeError as e:
s = str(e)
print(s)
split_report = s.strip().split('Here is the full report generated by LaTeX:')
if len(split_report) > 0 and split_report[1] != '':
timestring('LaTeX error: %s' % split_report[1])
else:
timestring('No TeX distribution found. Disabling TeX option.')
texuse.set(0)
tbut.config(state=DISABLED)
return
oldplotframe.append(mplCanvas.get_tk_widget())
del thefig[:]
toolbar_frame = Frame(tab3, borderwidth=0)
toolbar_frame.grid(row=0, column=1, columnspan=3, sticky='NW', padx=(400,0), pady=(600,0))
toolbar_frame.lift()
oldplotframe.append(toolbar_frame)
toolbar = NavigationToolbar2TkAgg(mplCanvas,toolbar_frame)
toolbar.update()
thefig.append(f.gcf())
savedplot.set('Saved image: ')
images = {'the_current_fig': -1}
def move(direction='forward'):
import os
try:
from PIL import Image
from PIL import ImageTk
except ImportError:
timestring("You need PIL/Pillow installed to do this.")
return
for i in oldplotframe:
i.destroy()
del oldplotframe[:]
# maybe sort by date added?
image_list = [i for i in all_images]
if len(image_list) == 0:
timestring('No images found in images folder.')
return
# figure out where we're up to
if images['the_current_fig'] != -1:
ind = image_list.index(images['the_current_fig'])
else:
ind = -1
if direction == 'forward':
newind = ind + 1
else:
newind = ind - 1
if newind < 1:
pbut.configure(state=DISABLED)
else:
pbut.configure(state=NORMAL)
if newind + 1 == len(image_list):
nbut.configure(state=DISABLED)
else:
nbut.configure(state=NORMAL)
imf = image_list[newind]
if not imf.endswith('.png'):
imf = imf + '.png'
image = Image.open(os.path.join(image_fullpath.get(), imf))
image_to_measure = ImageTk.PhotoImage(image)
old_height=image_to_measure.height()
old_width=image_to_measure.width()
def determine_new_dimensions(height, width):
maxh = 500
maxw = 1000
diff = float(height) / float(width)
if diff > 1:
# make height max
newh = maxh
# figure out level of magnification
prop = maxh / float(height)
neww = width * prop
elif diff < 1:
neww = maxw
prop = maxw / float(width)
newh = height * prop
elif diff == 1:
newh = maxh
neww = maxw
return (int(neww), int(newh))
# calculate new dimensions
newdimensions = determine_new_dimensions(old_height, old_width)
# determine left padding
padxright = 20
if newdimensions[0] != 1000:
padxleft = ((1000 - newdimensions[0]) / 2) + 40
else:
padxleft = 40
padytop = (500 - newdimensions[1]) / 2
def makezero(n):
if n < 0:
return 0
else:
return n
padxright = makezero(padxright)
padxleft = makezero(padxleft)
padytop = makezero(padytop)
image = image.resize(newdimensions)
image = ImageTk.PhotoImage(image)
frm = Frame(tab3, height=500, width=1000)
frm.grid(column=1, row=0, rowspan = 1, padx=(padxleft, padxright), \
pady=padytop, columnspan=3)
gallframe = Label(frm, image = image, justify=CENTER)
gallframe.pack(anchor='center', fill=BOTH)
oldplotframe.append(frm)
images[image_list[newind]] = image
images['the_current_fig'] = image_list[newind]
savedplot.set('Saved image: %s' % os.path.splitext(image_list[newind])[0])
timestring('Viewing %s' % os.path.splitext(image_list[newind])[0])
savedplot = StringVar()
savedplot.set('View saved images: ')
tmp = Label(tab3, textvariable=savedplot, font=("Helvetica", 13, "bold"))
padding = 555 if small_screen else 616
tmp.grid(row=0, column=1, padx=(40,0), pady=(padding-50,0), sticky=W)
pbut = Button(tab3, text='Previous', command=lambda: move(direction='back'))
pbut.grid(row=0, column=1, padx=(40,0), pady=(padding, 0), sticky=W)
pbut.config(state=DISABLED)
nbut = Button(tab3, text='Next', command=lambda: move(direction = 'forward'))
nbut.grid(row=0, column=1, padx=(160,0), pady=(padding, 0), sticky=W)
nbut.config(state=DISABLED)
# not in use while using the toolbar instead...
#def save_current_image():
# import os
# # figre out filename
# filename = namer(plotnametext.get(), type_of_data = 'image') + '.png'
# import sys
# defaultextension = '.png' if sys.platform == 'darwin' else ''
# kwarg = {'defaultextension': defaultextension,
# #'filetypes': [('all files', '.*'),
# #('png file', '.png')],
# 'initialfile': filename}
# imagedir = image_fullpath.get()
# if imagedir:
# kwarg['initialdir'] = imagedir
# fo = tkFileDialog.asksaveasfilename(**kwarg)
# if fo is None: # asksaveasfile return `None` if dialog closed with "cancel".
# return
# thefig[0].savefig(os.path.join(image_fullpath.get(), fo))
# timestring('%s saved to %s.' % (fo, image_fullpath.get()))
# title tab
Label(plot_option_frame, text='Image title:').grid(row=0, column=0, sticky='W', pady=(10, 0))
plotnametext=StringVar()
plotnametext.set('Untitled')
image_title_entry = Entry(plot_option_frame, textvariable=plotnametext)
image_title_entry.grid(row=0, column=1, pady=(10, 0))
all_text_widgets.append(image_title_entry)
def plot_callback(*args):
"""enable/disable based on selected dataset for plotting"""
if data_to_plot.get() == 'None':
plotbut.config(state=DISABLED)
else:
plotbut.config(state=NORMAL)
try:
thisdata = all_interrogations[data_to_plot.get()]
except KeyError:
return
single_entry.set('All')
single_sbcp.set('All')
subdrs = sorted(set([d for d in os.listdir(corpus_fullpath.get()) \
if os.path.isdir(os.path.join(corpus_fullpath.get(),d))]))
single_sbcp_optmenu.config(state=NORMAL)
single_sbcp_optmenu['menu'].delete(0, 'end')
single_sbcp_optmenu['menu'].add_command(label='All', command=_setit(single_sbcp, 'All'))
lst = []
if len(subdrs) > 0:
for c in subdrs:
lst.append(c)
single_sbcp_optmenu['menu'].add_command(label=c, command=_setit(single_sbcp, c))
single_entry_or_subcorpus['subcorpora'] = lst
else:
single_sbcp_optmenu.config(state=NORMAL)
single_sbcp_optmenu['menu'].delete(0, 'end')
single_sbcp_optmenu['menu'].add_command(label='All', command=_setit(single_sbcp, 'All'))
single_sbcp_optmenu.config(state=DISABLED)
if thisdata.results is not None:
plotbox.config(state=NORMAL)
single_ent_optmenu.config(state=NORMAL)
single_ent_optmenu['menu'].delete(0, 'end')
single_ent_optmenu['menu'].add_command(label='All', command=_setit(single_entry, 'All'))
lst = []
for corp in list(thisdata.results.columns)[:200]:
lst.append(corp)
single_ent_optmenu['menu'].add_command(label=corp, command=_setit(single_entry, corp))
single_entry_or_subcorpus['entries'] = lst
else:
single_ent_optmenu.config(state=NORMAL)
single_ent_optmenu['menu'].delete(0, 'end')
single_ent_optmenu['menu'].add_command(label='All', command=_setit(single_entry, 'All'))
single_ent_optmenu.config(state=DISABLED)
plotbox.config(state=NORMAL)
plotbranch.set('totals')
plotbox.config(state=DISABLED)
Label(plot_option_frame, text='Data to plot:').grid(row=1, column=0, sticky=W)
# select result to plot
data_to_plot = StringVar(root)
most_recent = all_interrogations[list(all_interrogations.keys())[-1]]
data_to_plot.set(most_recent)
every_interrogation = OptionMenu(plot_option_frame, data_to_plot, *tuple([i for i in list(all_interrogations.keys())]))
every_interrogation.config(width=20)
every_interrogation.grid(column=0, row=2, sticky=W, columnspan=2)
data_to_plot.trace("w", plot_callback)
Label(plot_option_frame, text='Entry:').grid(row=3, column=0, sticky=W)
single_entry = StringVar(root)
single_entry.set('All')
#most_recent = all_interrogations[all_interrogations.keys()[-1]]
#single_entry.set(most_recent)
single_ent_optmenu = OptionMenu(plot_option_frame, single_entry, *tuple(['']))
single_ent_optmenu.config(width=20, state=DISABLED)
single_ent_optmenu.grid(column=1, row=3, sticky=E)
def single_entry_plot_callback(*args):
"""turn off things if single entry selected"""
if single_entry.get() != 'All':
sbpl_but.config(state=NORMAL)
sbplt.set(0)
sbpl_but.config(state=DISABLED)
num_to_plot_box.config(state=NORMAL)
number_to_plot.set('1')
num_to_plot_box.config(state=DISABLED)
single_sbcp_optmenu.config(state=DISABLED)
entries = single_entry_or_subcorpus['entries']
if plotnametext.get() == 'Untitled' or plotnametext.get() in entries:
plotnametext.set(single_entry.get())
else:
plotnametext.set('Untitled')
sbpl_but.config(state=NORMAL)
number_to_plot.set('7')
num_to_plot_box.config(state=NORMAL)
single_sbcp_optmenu.config(state=NORMAL)
single_entry.trace("w", single_entry_plot_callback)
Label(plot_option_frame, text='Subcorpus:').grid(row=4, column=0, sticky=W)
single_sbcp = StringVar(root)
single_sbcp.set('All')
#most_recent = all_interrogations[all_interrogations.keys()[-1]]
#single_sbcp.set(most_recent)
single_sbcp_optmenu = OptionMenu(plot_option_frame, single_sbcp, *tuple(['']))
single_sbcp_optmenu.config(width=20, state=DISABLED)
single_sbcp_optmenu.grid(column=1, row=4, sticky=E)
def single_sbcp_plot_callback(*args):
"""turn off things if single entry selected"""
if single_sbcp.get() != 'All':
sbpl_but.config(state=NORMAL)
sbplt.set(0)
sbpl_but.config(state=DISABLED)
num_to_plot_box.config(state=NORMAL)
#number_to_plot.set('1')
#num_to_plot_box.config(state=DISABLED)
single_ent_optmenu.config(state=DISABLED)
charttype.set('bar')
entries = single_entry_or_subcorpus['subcorpora']
if plotnametext.get() == 'Untitled' or plotnametext.get() in entries:
plotnametext.set(single_sbcp.get())
else:
plotnametext.set('Untitled')
sbpl_but.config(state=NORMAL)
#number_to_plot.set('7')
num_to_plot_box.config(state=NORMAL)
single_ent_optmenu.config(state=NORMAL)
charttype.set('line')
single_sbcp.trace("w", single_sbcp_plot_callback)
# branch selection
plotbranch = StringVar(root)
plotbranch.set('results')
plotbox = OptionMenu(plot_option_frame, plotbranch, 'results', 'totals')
#plotbox.config(state=DISABLED)
plotbox.grid(row=2, column=0, sticky=E, columnspan=2)
def plotbranch_callback(*args):
if plotbranch.get() == 'totals':
single_sbcp_optmenu.config(state=DISABLED)
single_ent_optmenu.config(state=DISABLED)
sbpl_but.config(state=NORMAL)
sbplt.set(0)
sbpl_but.config(state=DISABLED)
trans_but_vis.config(state=NORMAL)
transpose_vis.set(0)
trans_but_vis.config(state=DISABLED)
else:
single_sbcp_optmenu.config(state=NORMAL)
single_ent_optmenu.config(state=NORMAL)
sbpl_but.config(state=NORMAL)
trans_but_vis.config(state=NORMAL)
plotbranch.trace('w', plotbranch_callback)
# num_to_plot
Label(plot_option_frame, text='Results to show:').grid(row=5, column=0, sticky=W)
number_to_plot = StringVar()
number_to_plot.set('7')
num_to_plot_box = Entry(plot_option_frame, textvariable=number_to_plot, width=3)
num_to_plot_box.grid(row=5, column=1, sticky=E)
all_text_widgets.append(num_to_plot_box)
def pie_callback(*args):
if charttype.get() == 'pie':
explbox.config(state=NORMAL)
ppie_but.config(state=NORMAL)
else:
explbox.config(state=DISABLED)
ppie_but.config(state=DISABLED)
if charttype.get().startswith('bar'):
#stackbut.config(state=NORMAL)
filledbut.config(state=NORMAL)
else:
#stackbut.config(state=DISABLED)
filledbut.config(state=DISABLED)
# can't do log y with area according to mpl
if charttype.get() == 'area':
logybut.deselect()
logybut.config(state=DISABLED)
filledbut.config(state=NORMAL)
else:
logybut.config(state=NORMAL)
filledbut.config(state=DISABLED)
# chart type
Label(plot_option_frame, text='Kind of chart').grid(row=6, column=0, sticky=W)
charttype = StringVar(root)
charttype.set('line')
kinds_of_chart = ('line', 'bar', 'barh', 'pie', 'area', 'heatmap')
chart_kind = OptionMenu(plot_option_frame, charttype, *kinds_of_chart)
chart_kind.config(width=10)
chart_kind.grid(row=6, column=1, sticky=E)
charttype.trace("w", pie_callback)
# axes
Label(plot_option_frame, text='x axis label:').grid(row=7, column=0, sticky=W)
x_axis_l = StringVar()
x_axis_l.set('')
tmp = Entry(plot_option_frame, textvariable=x_axis_l, font=("Courier New", 14), width=18)
tmp.grid(row=7, column=1, sticky=E)
all_text_widgets.append(tmp)
Label(plot_option_frame, text='y axis label:').grid(row=8, column=0, sticky=W)
y_axis_l = StringVar()
y_axis_l.set('')
tmp = Entry(plot_option_frame, textvariable=y_axis_l, font=("Courier New", 14), width=18)
tmp.grid(row=8, column=1, sticky=E)
all_text_widgets.append(tmp)
tmp = Label(plot_option_frame, text='Explode:')
if not small_screen:
tmp.grid(row=9, column=0, sticky=W)
explval = StringVar()
explval.set('')
explbox = Entry(plot_option_frame, textvariable=explval, font=("Courier New", 14), width=18)
if not small_screen:
explbox.grid(row=9, column=1, sticky=E)
all_text_widgets.append(explbox)
explbox.config(state=DISABLED)
# log options
log_x = IntVar()
Checkbutton(plot_option_frame, text="Log x axis", variable=log_x).grid(column=0, row=10, sticky=W)
log_y = IntVar()
logybut = Checkbutton(plot_option_frame, text="Log y axis", variable=log_y, width=13)
logybut.grid(column=1, row=10, sticky=E)
# transpose
transpose_vis = IntVar()
trans_but_vis = Checkbutton(plot_option_frame, text="Transpose", variable=transpose_vis, onvalue=True, offvalue=False, width=13)
trans_but_vis.grid(column=1, row=11, sticky=E)
cumul = IntVar()
cumulbutton = Checkbutton(plot_option_frame, text="Cumulative", variable=cumul, onvalue=True, offvalue=False)
cumulbutton.grid(column=0, row=11, sticky=W)
bw = IntVar()
Checkbutton(plot_option_frame, text="Black and white", variable=bw, onvalue=True, offvalue=False).grid(column=0, row=12, sticky=W)
texuse = IntVar()
tbut = Checkbutton(plot_option_frame, text="Use TeX", variable=texuse, onvalue=True, offvalue=False, width=13)
tbut.grid(column=1, row=12, sticky=E)
tbut.deselect()
if not py_script:
tbut.config(state=DISABLED)
rl = IntVar()
Checkbutton(plot_option_frame, text="Reverse legend", variable=rl, onvalue=True, offvalue=False).grid(column=0, row=13, sticky=W)
sbplt = IntVar()
sbpl_but = Checkbutton(plot_option_frame, text="Subplots", variable=sbplt, onvalue=True, offvalue=False, width=13)
sbpl_but.grid(column=1, row=13, sticky=E)
def sbplt_callback(*args):
"""if subplots are happening, allow layout"""
if sbplt.get():
lay1menu.config(state=NORMAL)
lay2menu.config(state=NORMAL)
else:
lay1menu.config(state=DISABLED)
lay2menu.config(state=DISABLED)
sbplt.trace("w", sbplt_callback)
gridv = IntVar()
gridbut = Checkbutton(plot_option_frame, text="Grid", variable=gridv, onvalue=True, offvalue=False)
gridbut.select()
gridbut.grid(column=0, row=14, sticky=W)
stackd = IntVar()
stackbut = Checkbutton(plot_option_frame, text="Stacked", variable=stackd, onvalue=True, offvalue=False, width=13)
stackbut.grid(column=1, row=14, sticky=E)
#stackbut.config(state=DISABLED)
part_pie = IntVar()
ppie_but = Checkbutton(plot_option_frame, text="Partial pie", variable=part_pie, onvalue=True, offvalue=False)
if not small_screen:
ppie_but.grid(column=0, row=15, sticky=W)
ppie_but.config(state=DISABLED)
filledvar = IntVar()
filledbut = Checkbutton(plot_option_frame, text="Filled", variable=filledvar, onvalue=True, offvalue=False, width=13)
if not small_screen:
filledbut.grid(column=1, row=15, sticky=E)
filledbut.config(state=DISABLED)
# chart type
Label(plot_option_frame, text='Colour scheme:').grid(row=16, column=0, sticky=W)
chart_cols = StringVar(root)
schemes = tuple(sorted(('Paired', 'Spectral', 'summer', 'Set1', 'Set2', 'Set3',
'Dark2', 'prism', 'RdPu', 'YlGnBu', 'RdYlBu', 'gist_stern', 'cool', 'coolwarm',
'gray', 'GnBu', 'gist_ncar', 'gist_rainbow', 'Wistia', 'CMRmap', 'bone',
'RdYlGn', 'spring', 'terrain', 'PuBu', 'spectral', 'rainbow', 'gist_yarg',
'BuGn', 'bwr', 'cubehelix', 'Greens', 'PRGn', 'gist_heat', 'hsv',
'Pastel2', 'Pastel1', 'jet', 'gist_earth', 'copper', 'OrRd', 'brg',
'gnuplot2', 'BuPu', 'Oranges', 'PiYG', 'YlGn', 'Accent', 'gist_gray', 'flag',
'BrBG', 'Reds', 'RdGy', 'PuRd', 'Blues', 'autumn', 'ocean', 'pink', 'binary',
'winter', 'gnuplot', 'hot', 'YlOrBr', 'seismic', 'Purples', 'RdBu', 'Greys',
'YlOrRd', 'PuOr', 'PuBuGn', 'nipy_spectral', 'afmhot',
'viridis', 'magma', 'plasma', 'inferno', 'diverge', 'default')))
ch_col = OptionMenu(plot_option_frame, chart_cols, *schemes)
ch_col.config(width=17)
ch_col.grid(row=16, column=1, sticky=E)
chart_cols.set('viridis')
# style
from matplotlib import style
try:
stys = tuple(stys.available)
except:
stys = tuple(('ggplot', 'fivethirtyeight', 'bmh', 'matplotlib', \
'mpl-white', 'classic', 'seaborn-talk'))
plot_style = StringVar(root)
plot_style.set('ggplot')
Label(plot_option_frame, text='Plot style:').grid(row=17, column=0, sticky=W)
pick_a_style = OptionMenu(plot_option_frame, plot_style, *stys)
pick_a_style.config(width=17)
pick_a_style.grid(row=17, column=1, sticky=E)
def ps_callback(*args):
if plot_style.get().startswith('seaborn'):
chart_cols.set('Default')
ch_col.config(state=DISABLED)
else:
ch_col.config(state=NORMAL)
plot_style.trace("w", ps_callback)
# legend pos
Label(plot_option_frame, text='Legend position:').grid(row=18, column=0, sticky=W)
legloc = StringVar(root)
legloc.set('best')
locs = tuple(('best', 'upper right', 'right', 'lower right', 'lower left', 'upper left', 'middle', 'none'))
loc_options = OptionMenu(plot_option_frame, legloc, *locs)
loc_options.config(width=17)
loc_options.grid(row=18, column=1, sticky=E)
# figure size
Label(plot_option_frame, text='Figure size:').grid(row=19, column=0, sticky=W)
figsiz1 = StringVar(root)
figsiz1.set('10')
figsizes = tuple(('2', '4', '6', '8', '10', '12', '14', '16', '18'))
fig1 = OptionMenu(plot_option_frame, figsiz1, *figsizes)
fig1.configure(width=6)
fig1.grid(row=19, column=1, sticky=W, padx=(27, 0))
Label(plot_option_frame, text="x").grid(row=19, column=1, padx=(30, 0))
figsiz2 = StringVar(root)
figsiz2.set('4')
fig2 = OptionMenu(plot_option_frame, figsiz2, *figsizes)
fig2.configure(width=6)
fig2.grid(row=19, column=1, sticky=E)
# subplots layout
Label(plot_option_frame, text='Subplot layout:').grid(row=20, column=0, sticky=W)
lay1 = StringVar(root)
lay1.set('3')
figsizes = tuple([str(i) for i in range(1, 20)])
lay1menu = OptionMenu(plot_option_frame, lay1, *figsizes)
lay1menu.configure(width=6)
lay1menu.grid(row=20, column=1, sticky=W, padx=(27, 0))
Label(plot_option_frame, text="x").grid(row=20, column=1, padx=(30, 0))
lay2 = StringVar(root)
lay2.set('3')
lay2menu = OptionMenu(plot_option_frame, lay2, *figsizes)
lay2menu.configure(width=6)
lay2menu.grid(row=20, column=1, sticky=E)
lay1menu.config(state=DISABLED)
lay2menu.config(state=DISABLED)
# show_totals option
Label(plot_option_frame, text='Show totals: ').grid(row=21, column=0, sticky=W)
showtot = StringVar(root)
showtot.set('Off')
showtot_options = tuple(('Off', 'legend', 'plot', 'legend + plot'))
show_tot_menu = OptionMenu(plot_option_frame, showtot, *showtot_options)
show_tot_menu.grid(row=21, column=1, sticky=E)
# plot button
plotbut = Button(plot_option_frame, text='Plot')
plotbut.grid(row=22, column=1, sticky=E)
plotbut.config(command=lambda: runner(plotbut, do_plotting), state=DISABLED)
################### ################### ################### ###################
# CONCORDANCE TAB # # CONCORDANCE TAB # # CONCORDANCE TAB # # CONCORDANCE TAB #
################### ################### ################### ###################
def add_conc_lines_to_window(data, loading=False, preserve_colour=True):
import pandas as pd
import re
#pd.set_option('display.height', 1000)
#pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', 200)
import corpkit
from corpkit.interrogation import Concordance
if isinstance(data, Concordance):
current_conc[0] = data
elif isinstance(data, pd.core.frame.DataFrame):
data = Concordance(data)
current_conc[0] = data
else:
current_conc[0] = data.concordance
data = data.concordance
if win.get() == 'Window':
window = 70
else:
window = int(win.get())
fnames = show_filenames.get()
them = show_themes.get()
spk = show_speaker.get()
subc = show_subcorpora.get()
ix = show_index.get()
if not fnames:
data = data.drop('f', axis=1, errors='ignore')
if not them:
data = data.drop('t', axis=1, errors='ignore')
if not spk:
data = data.drop('s', axis=1, errors='ignore')
if not subc:
data = data.drop('c', axis=1, errors='ignore')
if not ix:
data = data.drop('i', axis=1, errors='ignore')
if them:
data = data.drop('t', axis=1, errors='ignore')
themelist = get_list_of_themes(data)
if any(t != '' for t in themelist):
data.insert(0, 't', themelist)
# only do left align when long result ...
# removed because it's no big deal if always left aligned, and this
# copes when people search for 'root' or something.
def resize_by_window_size(df, window):
import os
if 'f' in list(df.columns):
df['f'] = df['f'].apply(os.path.basename)
df['l'] = df['l'].str.slice(start=-window, stop=None)
df['l'] = df['l'].str.rjust(window)
df['r'] = df['r'].str.slice(start=0, stop=window)
df['r'] = df['r'].str.ljust(window)
df['m'] = df['m'].str.ljust(df['m'].str.len().max())
return df
moddata = resize_by_window_size(data, window)
lines = moddata.to_string(header=False, index=show_df_index.get()).splitlines()
#lines = [re.sub('\s*\.\.\.\s*$', '', s) for s in lines]
conclistbox.delete(0, END)
for line in lines:
conclistbox.insert(END, line)
if preserve_colour:
# itemcoldict has the NUMBER and COLOUR
index_regex = re.compile(r'^([0-9]+)')
# make dict for NUMBER:INDEX
index_dict = {}
lines = conclistbox.get(0, END)
for index, line in enumerate(lines):
index_dict[int(re.search(index_regex, conclistbox.get(index)).group(1))] = index
todel = []
for item, colour in list(itemcoldict.items()):
try:
conclistbox.itemconfig(index_dict[item], {'bg':colour})
except KeyError:
todel.append(item)
for i in todel:
del itemcoldict[i]
if loading:
timestring('Concordances loaded.')
else:
timestring('Concordancing done: %d results.' % len(lines))
def delete_conc_lines(*args):
if type(current_conc[0]) == str:
return
items = conclistbox.curselection()
#current_conc[0].results.drop(current_conc[0].results.iloc[1,].name)
r = current_conc[0].drop([current_conc[0].iloc[int(n),].name for n in items])
add_conc_lines_to_window(r)
if len(items) == 1:
timestring('%d line removed.' % len(items))
if len(items) > 1:
timestring('%d lines removed.' % len(items))
global conc_saved
conc_saved = False
def delete_reverse_conc_lines(*args):
if type(current_conc[0]) == str:
return
items = [int(i) for i in conclistbox.curselection()]
r = current_conc[0].iloc[items,]
add_conc_lines_to_window(r)
conclistbox.select_set(0, END)
if len(conclistbox.get(0, END)) - len(items) == 1:
timestring('%d line removed.' % ((len(conclistbox.get(0, END)) - len(items))))
if len(conclistbox.get(0, END)) - len(items) > 1:
timestring('%d lines removed.' % ((len(conclistbox.get(0, END)) - len(items))))
global conc_saved
conc_saved = False
def conc_export(data='default'):
"""export conc lines to csv"""
import os
import pandas
if type(current_conc[0]) == str:
timestring('Nothing to export.')
return
if in_a_project.get() == 0:
home = os.path.expanduser("~")
docpath = os.path.join(home, 'Documents')
else:
docpath = project_fullpath.get()
if data == 'default':
thedata = current_conc[0]
thedata = thedata.to_csv(header = False, sep = '\t')
else:
thedata = all_conc[data]
thedata = thedata.to_csv(header = False, sep = '\t')
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose a name and place for your exported data.'}
else:
the_kwargs = {}
savepath = filedialog.asksaveasfilename(title='Save file',
initialdir=exported_fullpath.get(),
defaultextension='.csv',
initialfile='data.csv',
**the_kwargs)
if savepath == '':
return
with open(savepath, "w") as fo:
fo.write(thedata)
timestring('Concordance lines exported.')
global conc_saved
conc_saved = False
def get_list_of_colours(df):
flipped_colour={v: k for k, v in list(colourdict.items())}
colours = []
for i in list(df.index):
# if the item has been coloured
if i in list(itemcoldict.keys()):
itscolour=itemcoldict[i]
colournumber = flipped_colour[itscolour]
# append the number of the colour code, with some corrections
if colournumber == 0:
colournumber = 10
if colournumber == 9:
colournumber = 99
colours.append(colournumber)
else:
colours.append(10)
return colours
def get_list_of_themes(df):
flipped_colour={v: k for k, v in list(colourdict.items())}
themes = []
for i in list(df.index):
# if the item has been coloured
if i in list(itemcoldict.keys()):
itscolour=itemcoldict[i]
colournumber = flipped_colour[itscolour]
theme = entryboxes[list(entryboxes.keys())[colournumber]].get()
# append the number of the colour code, with some corrections
if theme is not False and theme != '':
themes.append(theme)
else:
themes.append('')
else:
themes.append('')
if all(i == '' for i in themes):
timestring('Warning: no scheme defined.')
return themes
def conc_sort(*args):
"""various sorting for conc, by updating dataframe"""
import re
import pandas
import itertools
sort_way = True
if isinstance(current_conc[0], str):
return
if prev_sortval[0] == sortval.get():
# if subcorpus is the same, etc, as well
sort_way = toggle()
df = current_conc[0]
prev_sortval[0] = sortval.get()
# sorting by first column is easy, so we don't need pandas
if sortval.get() == 'M1':
low = [l.lower() for l in df['m']]
df['tosorton'] = low
elif sortval.get() == 'File':
low = [l.lower() for l in df['f']]
df['tosorton'] = low
elif sortval.get() == 'Colour':
colist = get_list_of_colours(df)
df['tosorton'] = colist
elif sortval.get() == 'Scheme':
themelist = get_list_of_themes(df)
#df.insert(1, 't', themelist)
df.insert(1, 'tosorton', themelist)
elif sortval.get() == 'Index' or sortval.get() == 'Sort':
df = df.sort(ascending=sort_way)
elif sortval.get() == 'Subcorpus':
sbs = [l.lower() for l in df['c']]
df['tosorton'] = sbs
elif sortval.get() == 'Random':
import pandas
import numpy as np
df = df.reindex(np.random.permutation(df.index))
elif sortval.get() == 'Speaker':
try:
low = [l.lower() for l in df['s']]
except:
timestring('No speaker information to sort by.')
return
df['tosorton'] = low
# if sorting by other columns, however, it gets tough.
else:
td = {}
#if 'note' in kwargs.keys():
# td['note'] = kwargs['note']
# add_nltk_data_to_nltk_path(**td)
# tokenise the right part of each line
# get l or r column
col = sortval.get()[0].lower()
tokenised = [s.split() for s in list(df[col].values)]
if col == 'm':
repeats = 2
else:
repeats = 6
for line in tokenised:
for i in range(6 - len(line)):
if col == 'l':
line.insert(0, '')
if col == 'r':
line.append('')
# get 1-5 and convert it
num = int(sortval.get().lstrip('LMR'))
if col == 'l':
num = -num
if col == 'r':
num = num - 1
just_sortword = []
for l in tokenised:
if col != 'm':
just_sortword.append(l[num].lower())
else:
# horrible
if len(l) == 1:
just_sortword.append(l[0].lower())
elif len(l) > 1:
if num == 2:
just_sortword.append(l[1].lower())
elif num == -2:
just_sortword.append(l[-2].lower())
elif num == -1:
just_sortword.append(l[-1].lower())
# append list to df
df['tosorton'] = just_sortword
if sortval.get() not in ['Index', 'Random', 'Sort']:
df = df.sort(['tosorton'], ascending=sort_way)
df = df.drop(['tosorton'], axis=1, errors='ignore')
if show_filenames.get() == 0:
add_conc_lines_to_window(df.drop('f', axis=1, errors='ignore'))
else:
add_conc_lines_to_window(df)
timestring('%d concordance lines sorted.' % len(conclistbox.get(0, END)))
global conc_saved
conc_saved = False
def do_inflection(pos='v'):
global tb
from corpkit.dictionaries.process_types import get_both_spellings, add_verb_inflections
# get every word
all_words = [w.strip().lower() for w in tb.get(1.0, END).split()]
# try to get just selection
cursel = False
try:
lst = [w.strip().lower() for w in tb.get(SEL_FIRST, SEL_LAST).split()]
cursel = True
except:
lst = [w.strip().lower() for w in tb.get(1.0, END).split()]
lst = get_both_spellings(lst)
if pos == 'v':
expanded = add_verb_inflections(lst)
if pos == 'n':
from corpkit.inflect import pluralize
expanded = []
for w in lst:
expanded.append(w)
pl = pluralize(w)
if pl != w:
expanded.append(pl)
if pos == 'a':
from corpkit.inflect import grade
expanded = []
for w in lst:
expanded.append(w)
comp = grade(w, suffix = "er")
if comp != w:
expanded.append(comp)
supe = grade(w, suffix = "est")
if supe != w:
expanded.append(supe)
if cursel:
expanded = expanded + all_words
lst = sorted(set(expanded))
# delete widget text, reinsrt all
tb.delete(1.0, END)
for w in lst:
tb.insert(END, w + '\n')
def make_dict_from_existing_wordlists():
from collections import namedtuple
def convert(dictionary):
return namedtuple('outputnames', list(dictionary.keys()))(**dictionary)
all_preset_types = {}
from corpkit.dictionaries.process_types import processes
from corpkit.dictionaries.roles import roles
from corpkit.dictionaries.wordlists import wordlists
from corpkit.other import as_regex
customs = convert(custom_special_dict)
special_qs = [processes, roles, wordlists]
for kind in special_qs:
try:
types = [k for k in list(kind.__dict__.keys())]
except AttributeError:
types = [k for k in list(kind._asdict().keys())]
for t in types:
if kind == roles:
all_preset_types[t.upper() + '_ROLE'] = kind._asdict()[t]
else:
try:
all_preset_types[t.upper()] = kind.__dict__[t]
except AttributeError:
all_preset_types[t.upper()] = kind._asdict()[t]
return all_preset_types
predict = make_dict_from_existing_wordlists()
for k, v in list(predict.items()):
custom_special_dict[k.upper()] = v
def store_wordlist():
global tb
lst = [w.strip().lower() for w in tb.get(1.0, END).split()]
global schemename
if schemename.get() == '<Enter a name>':
timestring('Wordlist needs a name.')
return
specname = ''.join([i for i in schemename.get().upper() if i.isalnum() or i == '_'])
if specname in list(predict.keys()):
timestring('Name "%s" already taken, sorry.' % specname)
return
else:
if specname in list(custom_special_dict.keys()):
should_continue = messagebox.askyesno("Overwrite list",
"Overwrite existing list named '%s'?" % specname)
if not should_continue:
return
custom_special_dict[specname] = lst
global cust_spec
cust_spec.delete(0, END)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
timestring('LIST:%s stored to custom wordlists.' % specname)
parser_opts = StringVar()
speakseg = IntVar()
parse_with_metadata = IntVar()
tokenise_pos = IntVar()
tokenise_lem = IntVar()
clicked_done = IntVar()
clicked_done.set(0)
def parser_options(kind):
"""
A popup with corenlp options, to display before parsing.
this is a good candidate for 'preferences'
"""
from tkinter import Toplevel
global poptions
poptions = Toplevel()
poptions.title('Parser options')
from collections import OrderedDict
popt = OrderedDict()
if kind == 'parse':
tups = [('Tokenise', 'tokenize'),
('Clean XML', 'cleanxml'),
('Sentence splitting', 'ssplit'),
('POS tagging', 'pos'),
('Lemmatisation', 'lemma'),
('Named entity recognition', 'ner'),
('Parse', 'parse'),
('Referent tracking', 'dcoref')]
for k, v in tups:
popt[k] = v
butvar = {}
butbut = {}
orders = {'tokenize': 0,
'cleanxml': 1,
'ssplit': 2,
'pos': 3,
'lemma': 4,
'ner': 5,
'parse': 6,
'dcoref': 7}
for index, (k, v) in enumerate(popt.items()):
tmp = StringVar()
but = Checkbutton(poptions, text=k, variable=tmp, onvalue=v, offvalue=False)
but.grid(sticky=W)
if k != 'Clean XML':
but.select()
else:
but.deselect()
butbut[index] = but
butvar[index] = tmp
if kind == 'tokenise':
Checkbutton(poptions, text='POS tag', variable=tokenise_pos, onvalue=True, offvalue=False).grid(sticky=W)
Checkbutton(poptions, text='Lemmatise', variable=tokenise_lem, onvalue=True, offvalue=False).grid(sticky=W)
Checkbutton(poptions, text='Speaker segmentation', variable=speakseg, onvalue=True, offvalue=False).grid(sticky=W)
Checkbutton(poptions, text='XML metadata', variable=parse_with_metadata, onvalue=True, offvalue=False).grid(sticky=W)
def optionspicked(*args):
vals = [i.get() for i in list(butvar.values()) if i.get() is not False and i.get() != 0 and i.get() != '0']
vals = sorted(vals, key=lambda x:orders[x])
the_opts = ','.join(vals)
clicked_done.set(1)
poptions.destroy()
parser_opts.set(the_opts)
def qut():
poptions.destroy()
stopbut = Button(poptions, text='Cancel', command=qut)
stopbut.grid(row=15, sticky='w', padx=5)
stopbut = Button(poptions, text='Done', command=optionspicked)
stopbut.grid(row=15, sticky='e', padx=5)
############## ############## ############## ############## ##############
# WORDLISTS # # WORDLISTS # # WORDLISTS # # WORDLISTS # # WORDLISTS #
############## ############## ############## ############## ##############
def custom_lists():
"""a popup for defining custom wordlists"""
from tkinter import Toplevel
popup = Toplevel()
popup.title('Custom wordlists')
popup.wm_attributes('-topmost', 1)
Label(popup, text='Create wordlist', font=("Helvetica", 13, "bold")).grid(column=0, row=0)
global schemename
schemename = StringVar()
schemename.set('<Enter a name>')
scheme_name_field = Entry(popup, textvariable=schemename, justify=CENTER, width=21, font=("Courier New", 13))
#scheme_name_field.bind('<Button-1>', select_all_text)
scheme_name_field.grid(column=0, row=5, sticky=W, padx=(7, 0))
global tb
custom_words = Frame(popup, width=9, height=40)
custom_words.grid(row=1, column=0, padx=5)
cwscrbar = Scrollbar(custom_words)
cwscrbar.pack(side=RIGHT, fill=Y)
tb = Text(custom_words, yscrollcommand=cwscrbar.set, relief=SUNKEN,
bg='#F4F4F4', width=20, height=26, font=("Courier New", 13))
cwscrbar.config(command=tb.yview)
bind_textfuncts_to_widgets([tb, scheme_name_field])
tb.pack(side=LEFT, fill=BOTH)
tmp = Button(popup, text='Get verb inflections', command=lambda: do_inflection(pos = 'v'), width=17)
tmp.grid(row=2, column=0, sticky=W, padx=(7, 0))
tmp = Button(popup, text='Get noun inflections', command=lambda: do_inflection(pos = 'n'), width=17)
tmp.grid(row=3, column=0, sticky=W, padx=(7, 0))
tmp = Button(popup, text='Get adjective forms', command=lambda: do_inflection(pos = 'a'), width=17)
tmp.grid(row=4, column=0, sticky=W, padx=(7, 0))
#Button(text='Inflect as noun', command=lambda: do_inflection(pos = 'n')).grid()
savebut = Button(popup, text='Store', command=store_wordlist, width=17)
savebut.grid(row=6, column=0, sticky=W, padx=(7, 0))
Label(popup, text='Previous wordlists', font=("Helvetica", 13, "bold")).grid(column=1, row=0, padx=15)
other_custom_queries = Frame(popup, width=9, height=30)
other_custom_queries.grid(row=1, column=1, padx=15)
pwlscrbar = Scrollbar(other_custom_queries)
pwlscrbar.pack(side=RIGHT, fill=Y)
global cust_spec
cust_spec = Listbox(other_custom_queries, selectmode = EXTENDED, height=24, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=pwlscrbar.set, exportselection=False, width=20,
font=("Courier New", 13))
pwlscrbar.config(command=cust_spec.yview)
cust_spec.pack()
cust_spec.delete(0, END)
def colour_the_custom_queries(*args):
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
cust_spec.bind('<<Modified>>', colour_the_custom_queries)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
colour_the_custom_queries()
def remove_this_custom_query():
global cust_spec
indexes = cust_spec.curselection()
for index in indexes:
name = cust_spec.get(index)
del custom_special_dict[name]
cust_spec.delete(0, END)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
if len(indexes) == 1:
timestring('%s forgotten.' % name)
else:
timestring('%d lists forgotten.' % len(indexes))
def delete_this_custom_query():
global cust_spec
indexes = cust_spec.curselection()
for index in indexes:
name = cust_spec.get(index)
if name in list(predict.keys()):
timestring("%s can't be permanently deleted." % name)
return
del custom_special_dict[name]
try:
del saved_special_dict[name]
except:
pass
dump_custom_list_json()
cust_spec.delete(0, END)
for k, v in sorted(custom_special_dict.items()):
cust_spec.insert(END, k)
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
if len(indexes) == 1:
timestring('%s permanently deleted.' % name)
else:
timestring('%d lists permanently deleted.' % len(indexes))
def show_this_custom_query(*args):
global cust_spec
index = cust_spec.curselection()
if len(index) > 1:
timestring("Can only show one list at a time.")
return
name = cust_spec.get(index)
tb.delete(1.0, END)
for i in custom_special_dict[name]:
tb.insert(END, i + '\n')
schemename.set(name)
cust_spec.bind('<Return>', show_this_custom_query)
def merge_this_custom_query(*args):
global cust_spec
indexes = cust_spec.curselection()
names = [cust_spec.get(i) for i in indexes]
tb.delete(1.0, END)
for name in names:
for i in custom_special_dict[name]:
tb.insert(END, i + '\n')
schemename.set('Merged')
def add_custom_query_to_json():
global cust_spec
indexes = cust_spec.curselection()
for index in indexes:
name = cust_spec.get(index)
saved_special_dict[name] = custom_special_dict[name]
dump_custom_list_json()
color_saved(cust_spec, colour1 = '#ccebc5', colour2 = '#fbb4ae', lists = True)
if len(indexes) == 1:
timestring('%s saved to file.' % name)
else:
timestring('%d lists saved to file.' % len(indexes))
Button(popup, text='View/edit', command=show_this_custom_query, width=17).grid(column=1, row=2, sticky=E, padx=(0, 7))
Button(popup, text='Merge', command=merge_this_custom_query, width=17).grid(column=1, row=3, sticky=E, padx=(0, 7))
svb = Button(popup, text='Save', command=add_custom_query_to_json, width=17)
svb.grid(column=1, row=4, sticky=E, padx=(0, 7))
if in_a_project.get() == 0:
svb.config(state=DISABLED)
else:
svb.config(state=NORMAL)
Button(popup, text='Remove', command=remove_this_custom_query, width=17).grid(column=1, row=5, sticky=E, padx=(0, 7))
Button(popup, text='Delete', command=delete_this_custom_query, width=17).grid(column=1, row=6, sticky=E, padx=(0, 7))
def have_unsaved_list():
"""finds out if there is an unsaved list"""
global tb
lst = [w.strip().lower() for w in tb.get(1.0, END).split()]
if any(lst == l for l in list(custom_special_dict.values())):
return False
else:
return True
def quit_listing(*args):
if have_unsaved_list():
should_continue = messagebox.askyesno("Unsaved data",
"Unsaved list will be forgotten. Continue?")
if not should_continue:
return
popup.destroy()
stopbut = Button(popup, text='Done', command=quit_listing)
stopbut.grid(column=0, columnspan=2, row=7, pady=7)
############## ############## ############## ############## ##############
# COLSCHEMES # # COLSCHEMES # # COLSCHEMES # # COLSCHEMES # # COLSCHEMES #
############## ############## ############## ############## ##############
# a place for the toplevel entry info
entryboxes = OrderedDict()
# fill it with null data
for i in range(10):
tmp = StringVar()
tmp.set('')
entryboxes[i] = tmp
def codingschemer():
try:
global toplevel
toplevel.destroy()
except:
pass
from tkinter import Toplevel
toplevel = Toplevel()
toplevel.geometry('+1089+85')
toplevel.title("Coding scheme")
toplevel.wm_attributes('-topmost', 1)
Label(toplevel, text='').grid(row=0, column=0, pady=2)
def quit_coding(*args):
toplevel.destroy()
#Label(toplevel, text=('When concordancing, you can colour code lines using 0-9 keys. '\
# 'If you name the colours here, you can export or save the concordance lines with '\
# 'names attached.'), font=('Helvetica', 13, 'italic'), wraplength = 250, justify=LEFT).grid(row=0, column=0, columnspan=2)
stopbut = Button(toplevel, text='Done', command=quit_coding)
stopbut.grid(row=12, column=0, columnspan=2, pady=15)
for index, colour_index in enumerate(colourdict.keys()):
Label(toplevel, text='Key: %d' % colour_index).grid(row=index + 1, column=0)
fore = 'black'
if colour_index == 9:
fore = 'white'
tmp = Entry(toplevel, textvariable=entryboxes[index], bg=colourdict[colour_index], fg = fore)
all_text_widgets.append(tmp)
if index == 0:
tmp.focus_set()
tmp.grid(row=index + 1, column=1, padx=10)
toplevel.bind("<Return>", quit_coding)
toplevel.bind("<Tab>", focus_next_window)
# conc box needs to be defined up here
fsize = IntVar()
fsize.set(12)
conc_height = 510 if small_screen else 565
cfrm = Frame(tab4, height=conc_height, width=note_width - 10)
cfrm.grid(column=0, row=0, sticky='nw')
cscrollbar = Scrollbar(cfrm)
cscrollbarx = Scrollbar(cfrm, orient=HORIZONTAL)
cscrollbar.pack(side=RIGHT, fill=Y)
cscrollbarx.pack(side=BOTTOM, fill=X)
conclistbox = Listbox(cfrm, yscrollcommand=cscrollbar.set, relief=SUNKEN, bg='#F4F4F4',
xscrollcommand=cscrollbarx.set, height=conc_height,
width=note_width - 10, font=('Courier New', fsize.get()),
selectmode = EXTENDED)
conclistbox.pack(fill=BOTH)
cscrollbar.config(command=conclistbox.yview)
cscrollbarx.config(command=conclistbox.xview)
cfrm.pack_propagate(False)
def dec_concfont(*args):
size = fsize.get()
fsize.set(size - 1)
conclistbox.configure(font=('Courier New', fsize.get()))
def inc_concfont(*args):
size = fsize.get()
fsize.set(size + 1)
conclistbox.configure(font=('Courier New', fsize.get()))
def select_all_conclines(*args):
conclistbox.select_set(0, END)
def color_conc(colour=0, *args):
import re
"""color a conc line"""
index_regex = re.compile(r'^([0-9]+)')
col = colourdict[colour]
if type(current_conc[0]) == str:
return
items = conclistbox.curselection()
for index in items:
conclistbox.itemconfig(index, {'bg':col})
ind = int(re.search(index_regex, conclistbox.get(index)).group(1))
itemcoldict[ind] = col
conclistbox.selection_clear(0, END)
conclistbox.bind("<BackSpace>", delete_conc_lines)
conclistbox.bind("<Shift-KeyPress-BackSpace>", delete_reverse_conc_lines)
conclistbox.bind("<Shift-KeyPress-Tab>", conc_sort)
conclistbox.bind("<%s-minus>" % key, dec_concfont)
conclistbox.bind("<%s-equal>" % key, inc_concfont)
conclistbox.bind("<%s-a>" % key, select_all_conclines)
conclistbox.bind("<%s-s>" % key, lambda x: concsave())
conclistbox.bind("<%s-e>" % key, lambda x: conc_export())
conclistbox.bind("<%s-t>" % key, lambda x: toggle_filenames())
conclistbox.bind("<%s-A>" % key, select_all_conclines)
conclistbox.bind("<%s-S>" % key, lambda x: concsave())
conclistbox.bind("<%s-E>" % key, lambda x: conc_export())
conclistbox.bind("<%s-T>" % key, lambda x: toggle_filenames())
conclistbox.bind("0", lambda x: color_conc(colour=0))
conclistbox.bind("1", lambda x: color_conc(colour=1))
conclistbox.bind("2", lambda x: color_conc(colour=2))
conclistbox.bind("3", lambda x: color_conc(colour=3))
conclistbox.bind("4", lambda x: color_conc(colour=4))
conclistbox.bind("5", lambda x: color_conc(colour=5))
conclistbox.bind("6", lambda x: color_conc(colour=6))
conclistbox.bind("7", lambda x: color_conc(colour=7))
conclistbox.bind("8", lambda x: color_conc(colour=8))
conclistbox.bind("9", lambda x: color_conc(colour=9))
conclistbox.bind("0", lambda x: color_conc(colour=0))
# these were 'generate' and 'edit', but they look ugly right now. the spaces are nice though.
#lab = StringVar()
#lab.set('Concordancing: %s' % os.path.basename(corpus_fullpath.get()))
#Label(tab4, textvariable=lab, font=("Helvetica", 13, "bold")).grid(row=1, column=0, padx=20, pady=10, columnspan=5, sticky=W)
#Label(tab4, text=' ', font=("Helvetica", 13, "bold")).grid(row=1, column=9, columnspan=2)
conc_right_button_frame = Frame(tab4)
conc_right_button_frame.grid(row=1, column=0, padx=(10,0), sticky='N', pady=(5, 0))
# edit conc lines
conc_left_buts = Frame(conc_right_button_frame)
conc_left_buts.grid(row=1, column=0, columnspan=6, sticky='W')
Button(conc_left_buts, text='Delete selected', command=lambda: delete_conc_lines(), ).grid(row=0, column=0, sticky=W)
Button(conc_left_buts, text='Just selected', command=lambda: delete_reverse_conc_lines(), ).grid(row=0, column=1)
#Button(conc_left_buts, text='Sort', command=lambda: conc_sort()).grid(row=0, column=4)
def toggle_filenames(*args):
if isinstance(current_conc[0], str):
return
data = current_conc[0]
add_conc_lines_to_window(data)
def make_df_matching_screen():
import re
if type(current_conc[0]) == str:
return
df = current_conc[0]
if show_filenames.get() == 0:
df = df.drop('f', axis=1, errors = 'ignore')
if show_themes.get() == 0:
df = df.drop('t', axis=1, errors = 'ignore')
ix_to_keep = []
lines = conclistbox.get(0, END)
reg = re.compile(r'^\s*([0-9]+)')
for l in lines:
s = re.search(reg, l)
ix_to_keep.append(int(s.group(1)))
df = df.ix[ix_to_keep]
df = df.reindex(ix_to_keep)
return df
def concsave():
name = simpledialog.askstring('Concordance name', 'Choose a name for your concordance lines:')
if not name or name == '':
return
df = make_df_matching_screen()
all_conc[name] = df
global conc_saved
conc_saved = True
refresh()
def merge_conclines():
toget = prev_conc_listbox.curselection()
should_continue = True
global conc_saved
if not conc_saved:
if type(current_conc[0]) != str and len(toget) > 1:
should_continue = messagebox.askyesno("Unsaved data",
"Unsaved concordance lines will be forgotten. Continue?")
else:
should_continue = True
if not should_continue:
return
import pandas
dfs = []
if toget != ():
if len(toget) < 2:
for item in toget:
nm = prev_conc_listbox.get(item)
dfs.append(all_conc[nm])
dfs.append(current_conc[0])
#timestring('Need multiple concordances to merge.' % name)
#return
for item in toget:
nm = prev_conc_listbox.get(item)
dfs.append(all_conc[nm])
else:
timestring('Nothing selected to merge.' % name)
return
df = pandas.concat(dfs, ignore_index = True)
should_drop = messagebox.askyesno("Remove duplicates",
"Remove duplicate concordance lines?")
if should_drop:
df = df.drop_duplicates(subset = ['l', 'm', 'r'])
add_conc_lines_to_window(df)
def load_saved_conc():
should_continue = True
global conc_saved
if not conc_saved:
if type(current_conc[0]) != str:
should_continue = messagebox.askyesno("Unsaved data",
"Unsaved concordance lines will be forgotten. Continue?")
else:
should_continue = True
if should_continue:
toget = prev_conc_listbox.curselection()
if len(toget) > 1:
timestring('Only one selection allowed for load.' % name)
return
if toget != ():
nm = prev_conc_listbox.get(toget[0])
df = all_conc[nm]
add_conc_lines_to_window(df, loading=True, preserve_colour=False)
else:
return
fourbuts = Frame(conc_right_button_frame)
fourbuts.grid(row=1, column=6, columnspan=1, sticky='E')
Button(fourbuts, text='Store as', command=concsave).grid(row=0, column=0)
Button(fourbuts, text='Remove', command= lambda: remove_one_or_more(window='conc', kind='concordance')).grid(row=0, column=1)
Button(fourbuts, text='Merge', command=merge_conclines).grid(row=0, column=2)
Button(fourbuts, text='Load', command=load_saved_conc).grid(row=0, column=3)
showbuts = Frame(conc_right_button_frame)
showbuts.grid(row=0, column=0, columnspan=6, sticky='w')
show_filenames = IntVar()
fnbut = Checkbutton(showbuts, text='Filenames', variable=show_filenames, command=toggle_filenames)
fnbut.grid(row=0, column=4)
#fnbut.select()
show_filenames.trace('w', toggle_filenames)
show_subcorpora = IntVar()
sbcrp = Checkbutton(showbuts, text='Subcorpora', variable=show_subcorpora, command=toggle_filenames)
sbcrp.grid(row=0, column=3)
sbcrp.select()
show_subcorpora.trace('w', toggle_filenames)
show_themes = IntVar()
themebut = Checkbutton(showbuts, text='Scheme', variable=show_themes, command=toggle_filenames)
themebut.grid(row=0, column=1)
#themebut.select()
show_themes.trace('w', toggle_filenames)
show_speaker = IntVar()
showspkbut = Checkbutton(showbuts, text='Speakers', variable=show_speaker, command=toggle_filenames)
showspkbut.grid(row=0, column=5)
#showspkbut.select()
show_speaker.trace('w', toggle_filenames)
show_index = IntVar()
show_s_w_ix = Checkbutton(showbuts, text='Index', variable=show_index, command=toggle_filenames)
#show_s_w_ix.select()
show_s_w_ix.grid(row=0, column=2)
show_index.trace('w', toggle_filenames)
show_df_index = IntVar()
indbut = Checkbutton(showbuts, text='#', variable=show_df_index, command=toggle_filenames)
indbut.grid(row=0, column=0)
indbut.select()
# disabling because turning index off can cause problems when sorting, etc
indbut.config(state=DISABLED)
show_df_index.trace('w', toggle_filenames)
interrobut_conc = Button(showbuts, text='Re-run')
interrobut_conc.config(command=lambda: runner(interrobut_conc, do_interrogation, conc = True), state=DISABLED)
interrobut_conc.grid(row=0, column=6, padx=(5,0))
annotation = False
txt_var = StringVar()
txt_var_r = StringVar()
def annotate_corpus():
"""
Allow the user to annotate the corpus
"""
anno_trans = {'Middle': 'm',
'Scheme': 't',
'Index': 'index',
'Colour': 'q'}
def allow_text(*args):
"""
If the user wants to add text as value, let him/her
"""
if anno_dec.get() == 'Custom':
txt_box_r.config(state=NORMAL)
else:
txt_box_r.config(state=DISABLED)
def go_action(*args):
"""
Do annotation
"""
from corpkit.corpus import Corpus
corp = Corpus(current_corpus.get(), print_info=False)
data = current_conc[0]
chosen = anno_dec.get()
# add colour and scheme to df
if chosen == 'Scheme':
themelist = get_list_of_themes(data)
if any(t != '' for t in themelist):
data.insert(0, 't', themelist)
elif chosen == 'Colour':
colourlist = get_list_of_colours(data)
if any(t != '' for t in colourlist):
data.insert(0, 'q', colourlist)
if chosen == 'Tag':
annotation = txt_box.get()
elif chosen == 'Custom':
field = txt_box.get()
value = txt_box_r.get()
annotation = {field: value}
else:
field = txt_box.get()
value = anno_trans.get(chosen, chosen)
annotation = {field: value}
if debug:
print('Annotation:', annotation)
corp.annotate(data, annotation, dry_run=False)
timestring('Annotation done.')
refresh_by_metadata()
anno_pop.destroy()
from tkinter import Toplevel
anno_pop = Toplevel()
#anno_pop.geometry('+400+40')
anno_pop.title("Annotate corpus")
anno_pop.wm_attributes('-topmost', 1)
#Label(anno_pop, text='Annotate with:').grid(row=1, column=0, sticky=W)
anno_dec = StringVar()
anno_dec.set('Middle')
annotype = ('Index', 'Position', 'Speaker', 'Colour', 'Scheme', 'Middle', 'Custom', 'Tag')
anno_lb = OptionMenu(anno_pop, anno_dec, *annotype)
anno_lb.grid(row=2, column=1, sticky=E)
Label(anno_pop, text='Field:').grid(row=1, column=0)
Label(anno_pop, text='Value:').grid(row=1, column=1)
txt_box = Entry(anno_pop, textvariable=txt_var, width=10)
all_text_widgets.append(txt_box)
txt_box.grid(row=2, column=0)
txt_box_r = Entry(anno_pop, textvariable=txt_var_r, width=22)
txt_box_r.config(state=DISABLED)
all_text_widgets.append(txt_box_r)
txt_box_r.grid(row=3, columnspan=2)
anno_dec.trace("w", allow_text)
do_anno = Button(anno_pop, text='Annotate', command=go_action)
do_anno.grid(row=4, columnspan=2)
def recalc(*args):
import pandas as pd
name = simpledialog.askstring('New name', 'Choose a name for the data:')
if not name:
return
else:
out = current_conc[0].calculate()
all_interrogations[name] = out
name_of_interro_spreadsheet.set(name)
i_resultname.set('Interrogation results: %s' % str(name_of_interro_spreadsheet.get()))
totals_as_df = pd.DataFrame(out.totals, dtype=object)
if out.results is not None:
update_spreadsheet(interro_results, out.results, height=340)
subs = out.results.index
else:
update_spreadsheet(interro_results, df_to_show=None, height=340)
subs = out.totals.index
update_spreadsheet(interro_totals, totals_as_df, height=10)
ind = list(all_interrogations.keys()).index(name_of_interro_spreadsheet.get())
if ind == 0:
prev.configure(state=DISABLED)
else:
prev.configure(state=NORMAL)
if ind + 1 == len(list(all_interrogations.keys())):
nex.configure(state=DISABLED)
else:
nex.configure(state=NORMAL)
refresh()
subc_listbox.delete(0, 'end')
for e in list(subs):
if e != 'tkintertable-order':
subc_listbox.insert(END, e)
timestring('Calculation done. "%s" created.' % name)
note.change_tab(1)
recalc_but = Button(showbuts, text='Calculate', command=recalc)
recalc_but.config(command=recalc, state=DISABLED)
recalc_but.grid(row=0, column=7, padx=(5,0))
win = StringVar()
win.set('Window')
wind_size = OptionMenu(conc_left_buts, win, *tuple(('Window', '20', '30', '40',
'50', '60', '70', '80', '90', '100')))
wind_size.config(width=10)
wind_size.grid(row=0, column=5)
win.trace("w", conc_sort)
# possible sort
sort_vals = ('Index', 'Subcorpus', 'File', 'Speaker', 'Colour',
'Scheme', 'Random', 'L5', 'L4', 'L3', 'L2', 'L1',
'M1', 'M2', 'M-2', 'M-1', 'R1', 'R2', 'R3', 'R4', 'R5')
sortval = StringVar()
sortval.set('Sort')
prev_sortval = ['None']
srtkind = OptionMenu(conc_left_buts, sortval, *sort_vals)
srtkind.config(width=10)
srtkind.grid(row=0, column=3)
sortval.trace("w", conc_sort)
# export to csv
Button(conc_left_buts, text='Export', command=lambda: conc_export()).grid(row=0, column=6)
# annotate
Button(conc_left_buts, text='Annotate', command=annotate_corpus).grid(row=0, column=7)
store_label = Label(conc_right_button_frame, text='Stored concordances', font=("Helvetica", 13, "bold"))
prev_conc = Frame(conc_right_button_frame)
prev_conc.grid(row=0, column=7, rowspan=3, columnspan=2,
sticky=E, padx=(10,0), pady=(4,0))
prevcbar = Scrollbar(prev_conc)
prevcbar.pack(side=RIGHT, fill=Y)
prev_conc_lb_size = 20
prev_conc_listbox = Listbox(prev_conc, selectmode=EXTENDED, width=prev_conc_lb_size,
height=4, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=prevcbar.set, exportselection=False)
prev_conc_listbox.pack()
cscrollbar.config(command=prev_conc_listbox.yview)
root.update()
# this laziness is dynamic calculation of how far apart the left and right
# button sets should be in the conc pane. i don't want to go reframing
# everything, so instead, we figure out the best distance by math
# width of window - width of left buttons - with of prev conc and 'stored concordances' label (approx)
padd = note_width - showbuts.winfo_width() - (prev_conc.winfo_width() * 2)
# for now, just a guess!
if padd < 0:
padd = 250
store_label.grid(row=0, column=6, sticky=E, padx=(padd,0))
############## ############## ############## ############## ##############
# MANAGE TAB # # MANAGE TAB # # MANAGE TAB # # MANAGE 'TAB' # # MANAGE TAB #
############## ############## ############## ############## ##############
def make_new_project():
import os
from corpkit.other import new_project
reset_everything()
name = simpledialog.askstring('New project', 'Choose a name for your project:')
if not name:
return
home = os.path.expanduser("~")
docpath = os.path.join(home, 'Documents')
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose a directory in which to create your new project'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title = 'New project location',
initialdir = docpath,
**the_kwargs)
if not fp:
return
new_proj_basepath.set('New project: "%s"' % name)
new_project(name = name, loc = fp, root=root)
project_fullpath.set(os.path.join(fp, name))
os.chdir(project_fullpath.get())
image_fullpath.set(os.path.join(project_fullpath.get(), 'images'))
savedinterro_fullpath.set(os.path.join(project_fullpath.get(), 'saved_interrogations'))
conc_fullpath.set(os.path.join(project_fullpath.get(), 'saved_concordances'))
corpora_fullpath.set(os.path.join(project_fullpath.get(), 'data'))
exported_fullpath.set(os.path.join(project_fullpath.get(), 'exported'))
log_fullpath.set(os.path.join(project_fullpath.get(), 'logs'))
addbut.config(state=NORMAL)
open_proj_basepath.set('Loaded: "%s"' % name)
save_config()
root.title("corpkit: %s" % os.path.basename(project_fullpath.get()))
#load_project(path = os.path.join(fp, name))
timestring('Project "%s" created.' % name)
note.focus_on(tab0)
update_available_corpora()
def get_saved_results(kind='interrogation', add_to=False):
from corpkit.other import load_all_results
if kind == 'interrogation':
datad = savedinterro_fullpath.get()
elif kind == 'concordance':
datad = conc_fullpath.get()
elif kind == 'image':
datad = image_fullpath.get()
if datad == '':
timestring('No project loaded.')
if kind == 'image':
image_list = sorted([f for f in os.listdir(image_fullpath.get()) if f.endswith('.png')])
for iname in image_list:
if iname.replace('.png', '') not in all_images:
all_images.append(iname.replace('.png', ''))
if len(image_list) > 0:
nbut.config(state=NORMAL)
else:
if kind == 'interrogation':
r = load_all_results(data_dir=datad, root=root, note=note)
else:
r = load_all_results(data_dir=datad, root=root, note=note)
if r is not None:
for name, loaded in list(r.items()):
if kind == 'interrogation':
if isinstance(loaded, dict):
for subname, subloaded in list(loaded.items()):
all_interrogations[name + '-' + subname] = subloaded
else:
all_interrogations[name] = loaded
else:
all_conc[name] = loaded
if len(list(all_interrogations.keys())) > 0:
nex.configure(state=NORMAL)
refresh()
def recentchange(*args):
"""if user clicks a recent project, open it"""
if recent_project.get() != '':
project_fullpath.set(recent_project.get())
load_project(path=project_fullpath.get())
def projchange(*args):
"""if user changes projects, add to recent list and save prefs"""
if project_fullpath.get() != '' and 'Contents/MacOS' not in project_fullpath.get():
in_a_project.set(1)
if project_fullpath.get() not in most_recent_projects:
most_recent_projects.append(project_fullpath.get())
save_tool_prefs(printout=False)
#update_available_corpora()
else:
in_a_project.set(0)
# corpus path setter
savedinterro_fullpath = StringVar()
savedinterro_fullpath.set('')
data_basepath = StringVar()
data_basepath.set('Select data directory')
in_a_project = IntVar()
in_a_project.set(0)
project_fullpath = StringVar()
project_fullpath.set(rd)
project_fullpath.trace("w", projchange)
recent_project = StringVar()
recent_project.set('')
recent_project.trace("w", recentchange)
conc_fullpath = StringVar()
conc_fullpath.set('')
exported_fullpath = StringVar()
exported_fullpath.set('')
log_fullpath = StringVar()
import os
home = os.path.expanduser("~")
try:
os.makedirs(os.path.join(home, 'corpkit-logs'))
except:
pass
log_fullpath.set(os.path.join(home, 'corpkit-logs'))
image_fullpath = StringVar()
image_fullpath.set('')
image_basepath = StringVar()
image_basepath.set('Select image directory')
corpora_fullpath = StringVar()
corpora_fullpath.set('')
def imagedir_modified(*args):
import matplotlib
matplotlib.rcParams['savefig.directory'] = image_fullpath.get()
image_fullpath.trace("w", imagedir_modified)
def data_getdir():
import os
fp = filedialog.askdirectory(title = 'Open data directory')
if not fp:
return
savedinterro_fullpath.set(fp)
data_basepath.set('Saved data: "%s"' % os.path.basename(fp))
#sel_corpus_button.set('Selected corpus: "%s"' % os.path.basename(newc))
#fs = sorted([d for d in os.listdir(fp) if os.path.isfile(os.path.join(fp, d))])
timestring('Set data directory: %s' % os.path.basename(fp))
def image_getdir(nodialog = False):
import os
fp = filedialog.askdirectory()
if not fp:
return
image_fullpath.set(fp)
image_basepath.set('Images: "%s"' % os.path.basename(fp))
timestring('Set image directory: %s' % os.path.basename(fp))
def save_one_or_more(kind = 'interrogation'):
sel_vals = manage_listbox_vals
if len(sel_vals) == 0:
timestring('Nothing selected to save.')
return
from corpkit.other import save
import os
saved = 0
existing = 0
# for each filename selected
for i in sel_vals:
safename = urlify(i) + '.p'
# make sure not already there
if safename not in os.listdir(savedinterro_fullpath.get()):
if kind == 'interrogation':
savedata = all_interrogations[i]
savedata.query.pop('root', None)
savedata.query.pop('note', None)
save(savedata, safename, savedir = savedinterro_fullpath.get())
else:
savedata = all_conc[i]
try:
savedata.query.pop('root', None)
savedata.query.pop('note', None)
except:
pass
save(savedata, safename, savedir = conc_fullpath.get())
saved += 1
else:
existing += 1
timestring('%s already exists in %s.' % (urlify(i), os.path.basename(savedinterro_fullpath.get())))
if saved == 1 and existing == 0:
timestring('%s saved.' % sel_vals[0])
else:
if existing == 0:
timestring('%d %ss saved.' % (len(sel_vals), kind))
else:
timestring('%d %ss saved, %d already existed' % (saved, kind, existing))
refresh()
manage_callback()
def remove_one_or_more(window=False, kind ='interrogation'):
sel_vals = manage_listbox_vals
if window is not False:
toget = prev_conc_listbox.curselection()
sel_vals = [prev_conc_listbox.get(toget)]
if len(sel_vals) == 0:
timestring('No interrogations selected.')
return
for i in sel_vals:
try:
if kind == 'interrogation':
del all_interrogations[i]
else:
del all_conc[i]
except:
pass
if len(sel_vals) == 1:
timestring('%s removed.' % sel_vals[0])
else:
timestring('%d interrogations removed.' % len(sel_vals))
if kind == 'image':
refresh_images()
refresh()
manage_callback()
def del_one_or_more(kind = 'interrogation'):
sel_vals = manage_listbox_vals
ext = '.p'
if kind == 'interrogation':
p = savedinterro_fullpath.get()
elif kind == 'image':
p = image_fullpath.get()
ext = '.png'
else:
p = conc_fullpath.get()
if len(sel_vals) == 0:
timestring('No interrogations selected.')
return
import os
result = messagebox.askquestion("Are You Sure?", "Permanently delete the following files:\n\n %s" % '\n '.join(sel_vals), icon='warning')
if result == 'yes':
for i in sel_vals:
if kind == 'interrogation':
del all_interrogations[i]
os.remove(os.path.join(p, i + ext))
elif kind == 'concordance':
del all_conc[i]
os.remove(os.path.join(p, i + ext))
else:
all_images.remove(i)
os.remove(os.path.join(p, i + ext))
if len(sel_vals) == 1:
timestring('%s deleted.' % sel_vals[0])
else:
timestring('%d %ss deleted.' % (kind, len(sel_vals)))
refresh()
manage_callback()
def urlify(s):
"Turn title into filename"
import re
#s = s.lower()
s = re.sub(r"[^\w\s-]", '', s)
s = re.sub(r"\s+", '-', s)
s = re.sub(r"-(textbf|emph|textsc|textit)", '-', s)
return s
def rename_one_or_more(kind = 'interrogation'):
ext = '.p'
sel_vals = manage_listbox_vals
if kind == 'interrogation':
p = savedinterro_fullpath.get()
elif kind == 'image':
p = image_fullpath.get()
ext = '.png'
else:
p = conc_fullpath.get()
if len(sel_vals) == 0:
timestring('No items selected.')
return
import os
permanently = True
if permanently:
perm_text='permanently '
else:
perm_text=''
for i in sel_vals:
answer = simpledialog.askstring('Rename', 'Choose a new name for "%s":' % i, initialvalue = i)
if answer is None or answer == '':
return
else:
if kind == 'interrogation':
all_interrogations[answer] = all_interrogations.pop(i)
elif kind == 'image':
ind = all_images.index(i)
all_images.remove(i)
all_images.insert(ind, answer)
else:
all_conc[answer] = all_conc.pop(i)
if permanently:
oldf = os.path.join(p, i + ext)
if os.path.isfile(oldf):
newf = os.path.join(p, urlify(answer) + ext)
os.rename(oldf, newf)
if kind == 'interrogation':
if name_of_interro_spreadsheet.get() == i:
name_of_interro_spreadsheet.set(answer)
i_resultname.set('Interrogation results: %s' % str(answer))
#update_spreadsheet(interro_results, all_interrogations[answer].results)
if name_of_o_ed_spread.get() == i:
name_of_o_ed_spread.set(answer)
#update_spreadsheet(o_editor_results, all_interrogations[answer].results)
if name_of_n_ed_spread.get() == i:
name_of_n_ed_spread.set(answer)
#update_spreadsheet(n_editor_results, all_interrogations[answer].results)
if kind == 'image':
refresh_images()
if len(sel_vals) == 1:
timestring('%s %srenamed as %s.' % (sel_vals[0], perm_text, answer))
else:
timestring('%d items %srenamed.' % (len(sel_vals), perm_text))
refresh()
manage_callback()
def export_interrogation(kind = 'interrogation'):
sel_vals = manage_listbox_vals
"""save dataframes and options to file"""
import os
import pandas
fp = False
for i in sel_vals:
answer = simpledialog.askstring('Export data', 'Choose a save name for "%s":' % i, initialvalue = i)
if answer is None or answer == '':
return
if kind != 'interrogation':
conc_export(data = i)
else:
data = all_interrogations[i]
keys = list(data.__dict__.keys())
if in_a_project.get() == 0:
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose save directory for exported interrogation'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title = 'Choose save directory', **the_kwargs)
if fp == '':
return
else:
fp = project_fullpath.get()
os.makedirs(os.path.join(exported_fullpath.get(), answer))
for k in keys:
if k == 'results':
if data.results is not None:
tkdrop = data.results.drop('tkintertable-order', errors = 'ignore')
tkdrop.to_csv(os.path.join(exported_fullpath.get(), answer, 'results.csv'), sep ='\t', encoding = 'utf-8')
if k == 'totals':
if data.totals is not None:
tkdrop = data.totals.drop('tkintertable-order', errors = 'ignore')
tkdrop.to_csv(os.path.join(exported_fullpath.get(), answer, 'totals.csv'), sep ='\t', encoding = 'utf-8')
if k == 'query':
if getattr(data, 'query', None):
pandas.DataFrame(list(data.query.values()), index = list(data.query.keys())).to_csv(os.path.join(exported_fullpath.get(), answer, 'query.csv'), sep ='\t', encoding = 'utf-8')
#if k == 'table':
# if 'table' in list(data.__dict__.keys()) and data.table:
# pandas.DataFrame(list(data.query.values()), index = list(data.query.keys())).to_csv(os.path.join(exported_fullpath.get(), answer, 'table.csv'), sep ='\t', encoding = 'utf-8')
if fp:
timestring('Results exported to %s' % (os.path.join(os.path.basename(exported_fullpath.get()), answer)))
def reset_everything():
# result names
i_resultname.set('Interrogation results:')
resultname.set('Results to edit:')
editoname.set('Edited results:')
savedplot.set('View saved images: ')
open_proj_basepath.set('Open project')
corpus_fullpath.set('')
current_corpus.set('')
corpora_fullpath.set('')
project_fullpath.set(rd)
#special_queries.set('Off')
# spreadsheets
update_spreadsheet(interro_results, df_to_show=None, height=340)
update_spreadsheet(interro_totals, df_to_show=None, height=10)
update_spreadsheet(o_editor_results, df_to_show=None, height=140)
update_spreadsheet(o_editor_totals, df_to_show=None, height=10)
update_spreadsheet(n_editor_results, df_to_show=None, height=140)
update_spreadsheet(n_editor_totals, df_to_show=None, height=10)
# interrogations
for e in list(all_interrogations.keys()):
del all_interrogations[e]
# another way:
all_interrogations.clear()
# subcorpora listbox
subc_listbox.delete(0, END)
subc_listbox_build.delete(0, END)
# concordance
conclistbox.delete(0, END)
# every interrogation
#every_interro_listbox.delete(0, END)
# every conc
#ev_conc_listbox.delete(0, END)
prev_conc_listbox.delete(0, END)
# images
#every_image_listbox.delete(0, END)
every_interrogation['menu'].delete(0, 'end')
#pick_subcorpora['menu'].delete(0, 'end')
# speaker listboxes
speaker_listbox.delete(0, 'end')
#speaker_listbox_conc.delete(0, 'end')
# keys
for e in list(all_conc.keys()):
del all_conc[e]
for e in all_images:
all_images.remove(e)
#update_available_corpora(delete = True)
refresh()
def convert_speakdict_to_string(dictionary):
"""turn speaker info dict into a string for configparser"""
if not dictionary:
return 'none'
out = []
for k, v in list(dictionary.items()):
out.append('%s:%s' % (k, ','.join([i.replace(',', '').replace(':', '').replace(';', '') for i in v])))
if not out:
return 'none'
else:
return ';'.join(out)
def parse_speakdict(string):
"""turn configparser's speaker info back into a dict"""
if string is 'none' or not string:
return {}
redict = {}
corps = string.split(';')
for c in corps:
try:
name, vals = c.split(':')
except ValueError:
continue
vs = vals.split(',')
redict[name] = vs
return redict
def load_custom_list_json():
import json
f = os.path.join(project_fullpath.get(), 'custom_wordlists.txt')
if os.path.isfile(f):
data = json.loads(open(f).read())
for k, v in data.items():
if k not in list(custom_special_dict.keys()):
custom_special_dict[k] = v
if k not in list(saved_special_dict.keys()):
saved_special_dict[k] = v
def dump_custom_list_json():
import json
f = os.path.join(project_fullpath.get(), 'custom_wordlists.txt')
with open(f, 'w') as fo:
fo.write(json.dumps(saved_special_dict))
def load_config():
"""use configparser to get project settings"""
import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
Config = configparser.ConfigParser()
f = os.path.join(project_fullpath.get(), 'settings.ini')
Config.read(f)
# errors here
plot_style.set(conmap(Config, "Visualise")['plot style'])
texuse.set(conmap(Config, "Visualise")['use tex'])
x_axis_l.set(conmap(Config, "Visualise")['x axis title'])
chart_cols.set(conmap(Config, "Visualise")['colour scheme'])
rel_corpuspath = conmap(Config, "Interrogate")['corpus path']
try:
files_as_subcorpora.set(conmap(Config, "Interrogate")['treat files as subcorpora'])
except KeyError:
files_as_subcorpora.set(False)
if rel_corpuspath:
current_corpus.get(relcorpuspath)
#corpus_fullpath.set(corpa)
spk = conmap(Config, "Interrogate")['speakers']
corpora_speakers = parse_speakdict(spk)
for i, v in list(corpora_speakers.items()):
corpus_names_and_speakers[i] = v
fsize.set(conmap(Config, "Concordance")['font size'])
# window setting causes conc_sort to run, causing problems.
#win.set(conmap(Config, "Concordance")['window'])
#kind_of_dep.set(conmap(Config, 'Interrogate')['dependency type'])
#conc_kind_of_dep.set(conmap(Config, "Concordance")['dependency type'])
cods = conmap(Config, "Concordance")['coding scheme']
if cods is None:
for _, val in list(entryboxes.items()):
val.set('')
else:
codsep = cods.split(',')
for (box, val), cod in zip(list(entryboxes.items()), codsep):
val.set(cod)
if corpus_fullpath.get():
subdrs = [d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))]
else:
subdrs = []
if len(subdrs) == 0:
charttype.set('bar')
refresh()
def load_project(path=False):
import os
if path is False:
if sys.platform == 'darwin':
the_kwargs = {'message': 'Choose project directory'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title='Open project',
**the_kwargs)
else:
fp = os.path.abspath(path)
if not fp or fp == '':
return
reset_everything()
image_fullpath.set(os.path.join(fp, 'images'))
savedinterro_fullpath.set(os.path.join(fp, 'saved_interrogations'))
conc_fullpath.set(os.path.join(fp, 'saved_concordances'))
exported_fullpath.set(os.path.join(fp, 'exported'))
corpora_fullpath.set(os.path.join(fp, 'data'))
log_fullpath.set(os.path.join(fp, 'logs'))
if not os.path.isdir(savedinterro_fullpath.get()):
timestring('Selected folder does not contain corpkit project.')
return
project_fullpath.set(fp)
f = os.path.join(project_fullpath.get(), 'settings.ini')
if os.path.isfile(f):
load_config()
os.chdir(fp)
list_of_corpora = update_available_corpora()
addbut.config(state=NORMAL)
get_saved_results(kind='interrogation')
get_saved_results(kind='concordance')
get_saved_results(kind='image')
open_proj_basepath.set('Loaded: "%s"' % os.path.basename(fp))
# reset tool:
root.title("corpkit: %s" % os.path.basename(fp))
# check for parsed corpora
if not current_corpus.get():
parsed_corp = [d for d in list_of_corpora if d.endswith('-parsed')]
# select
first = False
if len(parsed_corp) > 0:
first = parsed_corp[0]
if first:
corpus_fullpath.set(os.path.abspath(first))
name = make_corpus_name_from_abs(project_fullpath.get(), first)
current_corpus.set(name)
else:
corpus_fullpath.set('')
# no corpora, so go to build...
note.focus_on(tab0)
if corpus_fullpath.get() != '':
try:
subdrs = sorted([d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))])
except FileNotFoundError:
subdrs = []
else:
subdrs = []
#lab.set('Concordancing: %s' % corpus_name)
#pick_subcorpora['menu'].delete(0, 'end')
#if len(subdrs) > 0:
# pick_subcorpora['menu'].add_command(label='all', command=_setit(subc_pick, 'all'))
# pick_subcorpora.config(state=NORMAL)
# for choice in subdrs:
# pick_subcorpora['menu'].add_command(label=choice, command=_setit(subc_pick, choice))
#else:
# pick_subcorpora.config(state=NORMAL)
# pick_subcorpora['menu'].add_command(label='None', command=_setit(subc_pick, 'None'))
# pick_subcorpora.config(state=DISABLED)
timestring('Project "%s" opened.' % os.path.basename(fp))
note.progvar.set(0)
#if corpus_name in list(corpus_names_and_speakers.keys()):
refresh_by_metadata()
#speakcheck.config(state=NORMAL)
#else:
# pass
#speakcheck.config(state=DISABLED)
load_custom_list_json()
def view_query(kind=False):
if len(manage_listbox_vals) == 0:
return
if len(manage_listbox_vals) > 1:
timestring('Can only view one interrogation at a time.')
return
global frame_to_the_right
frame_to_the_right = Frame(manage_pop)
frame_to_the_right.grid(column=2, row=0, rowspan = 6)
Label(frame_to_the_right, text='Query information', font=("Helvetica", 13, "bold")).grid(sticky=W, row=0, column=0, padx=(10,0))
mlb = Table(frame_to_the_right, ['Option', 'Value'],
column_weights=[1, 1], height=70, width=30)
mlb.grid(sticky=N, column=0, row=1)
for i in mlb._mlb.listboxes:
i.config(height=29)
mlb.columnconfig('Option', background='#afa')
mlb.columnconfig('Value', background='#efe')
q_dict = dict(all_interrogations[manage_listbox_vals[0]].query)
mlb.clear()
#show_query_vals.delete(0, 'end')
flipped_trans = {v: k for k, v in list(transdict.items())}
for d in ['dataframe1', 'dataframe2']:
q_dict.pop(d, None)
for k, v in sorted(q_dict.items()):
try:
if isinstance(v, (int, float)) and v == 0:
v = '0'
if v is None:
v == 'None'
if not v:
v = 'False'
if v is True:
v = 'True'
# could be bad with threshold etc
if v == 1:
v = 'True'
except:
pass
mlb.append([k, v])
if q_dict.get('query'):
qubox = Text(frame_to_the_right, font=("Courier New", 14), relief=SUNKEN,
wrap=WORD, width=40, height=5, undo=True)
qubox.grid(column=0, row=2, rowspan = 1, padx=(10,0))
qubox.delete(1.0, END)
qubox.insert(END, q_dict['query'])
manage_box['qubox'] = qubox
bind_textfuncts_to_widgets([qubox])
else:
try:
manage_box['qubox'].destroy()
except:
pass
manage_listbox_vals = []
def onselect_manage(evt):
# remove old vals
for i in manage_listbox_vals:
manage_listbox_vals.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in manage_listbox_vals:
manage_listbox_vals.append(value)
new_proj_basepath = StringVar()
new_proj_basepath.set('New project')
open_proj_basepath = StringVar()
open_proj_basepath.set('Open project')
the_current_kind = StringVar()
def manage_popup():
from tkinter import Toplevel
global manage_pop
manage_pop = Toplevel()
manage_pop.geometry('+400+40')
manage_pop.title("Manage data: %s" % os.path.basename(project_fullpath.get()))
manage_pop.wm_attributes('-topmost', 1)
manage_what = StringVar()
manage_what.set('Manage: ')
#Label(manage_pop, textvariable=manage_what).grid(row=0, column=0, sticky='W', padx=(5, 0))
manag_frame = Frame(manage_pop, height=30)
manag_frame.grid(column=0, row=1, rowspan = 1, columnspan=2, sticky='NW', padx=10)
manage_scroll = Scrollbar(manag_frame)
manage_scroll.pack(side=RIGHT, fill=Y)
manage_listbox = Listbox(manag_frame, selectmode = SINGLE, height=30, width=30, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=manage_scroll.set, exportselection=False)
manage_listbox.pack(fill=BOTH)
manage_listbox.select_set(0)
manage_scroll.config(command=manage_listbox.yview)
xx = manage_listbox.bind('<<ListboxSelect>>', onselect_manage)
# default: w option
manage_listbox.select_set(0)
the_current_kind.set('interrogation')
#gtsv = StringVar()
#gtsv.set('Get saved')
#getbut = Button(manage_pop, textvariable=gtsv, command=lambda: get_saved_results(), width=22)
#getbut.grid(row=2, column=0, columnspan=2)
manage_type = StringVar()
manage_type.set('Interrogations')
#Label(manage_pop, text='Save selected: ').grid(sticky=E, row=6, column=1)
savebut = Button(manage_pop, text='Save', command=lambda: save_one_or_more(kind = the_current_kind.get()))
savebut.grid(padx=15, sticky=W, column=0, row=3)
viewbut = Button(manage_pop, text='View', command=lambda: view_query(kind = the_current_kind.get()))
viewbut.grid(padx=15, sticky=W, column=0, row=4)
renamebut = Button(manage_pop, text='Rename', command=lambda: rename_one_or_more(kind = the_current_kind.get()))
renamebut.grid(padx=15, sticky=W, column=0, row=5)
#Checkbutton(manage_pop, text="Permanently", variable=perm, onvalue=True, offvalue=False).grid(column=1, row=16, padx=15, sticky=W)
exportbut = Button(manage_pop, text='Export', command=lambda: export_interrogation(kind = the_current_kind.get()))
exportbut.grid(padx=15, sticky=E, column=1, row=3)
#Label(manage_pop, text='Remove selected: '()).grid(padx=15, sticky=W, row=4, column=0)
removebut = Button(manage_pop, text='Remove', command= lambda: remove_one_or_more(kind = the_current_kind.get()))
removebut.grid(padx=15, sticky=E, column=1, row=4)
#Label(manage_pop, text='Delete selected: '()).grid(padx=15, sticky=E, row=5, column=1)
deletebut = Button(manage_pop, text='Delete', command=lambda: del_one_or_more(kind = the_current_kind.get()))
deletebut.grid(padx=15, sticky=E, column=1, row=5)
to_manage = OptionMenu(manage_pop, manage_type, *tuple(('Interrogations', 'Concordances', 'Images')))
to_manage.config(width=32, justify=CENTER)
to_manage.grid(row=0, column=0, columnspan=2)
def managed(*args):
#vals = [i.get() for i in butvar.values() if i.get() is not False and i.get() != 0 and i.get() != '0']
#vals = sorted(vals, key=lambda x:orders[x])
#the_opts = ','.join(vals)]
manage_pop.destroy()
try:
del manage_callback
except:
pass
global manage_callback
def manage_callback(*args):
import os
"""show correct listbox, enable disable buttons below"""
# set text
#manage_what.set('Manage %s' % manage_type.get().lower())
#gtsv.set('Get saved %s' % manage_type.get().lower())
# set correct action for buttons
the_current_kind.set(manage_type.get().lower().rstrip('s'))
#get_saved_results(kind = the_current_kind.get())
# enable all buttons
#getbut.config(state=NORMAL)
#try:
savebut.config(state=NORMAL)
viewbut.config(state=NORMAL)
renamebut.config(state=NORMAL)
exportbut.config(state=NORMAL)
removebut.config(state=NORMAL)
deletebut.config(state=NORMAL)
manage_listbox.delete(0, 'end')
if the_current_kind.get() == 'interrogation':
the_path = savedinterro_fullpath.get()
the_ext = '.p'
list_of_entries = list(all_interrogations.keys())
elif the_current_kind.get() == 'concordance':
the_path = conc_fullpath.get()
the_ext = '.p'
list_of_entries = list(all_conc.keys())
viewbut.config(state=DISABLED)
try:
frame_to_the_right.destroy()
except:
pass
elif the_current_kind.get() == 'image':
the_path = image_fullpath.get()
the_ext = '.png'
refresh_images()
list_of_entries = all_images
viewbut.config(state=DISABLED)
savebut.config(state=DISABLED)
exportbut.config(state=DISABLED)
removebut.config(state=DISABLED)
try:
frame_to_the_right.destroy()
except:
pass
for datum in list_of_entries:
manage_listbox.insert(END, datum)
color_saved(manage_listbox, the_path, '#ccebc5', '#fbb4ae', ext = the_ext)
manage_type.trace("w", manage_callback)
manage_type.set('Interrogations')
############## ############## ############## ############## ##############
# BUILD TAB # # BUILD TAB # # BUILD TAB # # BUILD TAB # # BUILD TAB #
############## ############## ############## ############## ##############
from corpkit.build import download_large_file, get_corpus_filepaths, \
check_jdk, parse_corpus, move_parsed_files, corenlp_exists
def create_tokenised_text():
from corpkit.corpus import Corpus
note.progvar.set(0)
parser_options('tokenise')
root.wait_window(poptions)
if not clicked_done.get():
return
#tokbut.config(state=DISABLED)
#tokbut = Button(tab0, textvariable=tokenise_button_text, command=ignore, width=33)
#tokbut.grid(row=6, column=0, sticky=W)
unparsed_corpus_path = corpus_fullpath.get()
#filelist, _ = get_corpus_filepaths(project_fullpath.get(), unparsed_corpus_path)
corp = Corpus(unparsed_corpus_path, print_info=False)
parsed = corp.tokenise(postag=tokenise_pos,
lemmatise=tokenise_lem,
root=root,
stdout=sys.stdout,
note=note,
nltk_data_path=nltk_data_path,
speaker_segmentation=speakseg.get(),
metadata=parse_with_metadata.get())
#corpus_fullpath.set(outdir)
outdir = parsed.path
current_corpus.set(parsed.name)
subdrs = [d for d in os.listdir(corpus_fullpath.get()) if os.path.isdir(os.path.join(corpus_fullpath.get(),d))]
if len(subdrs) == 0:
charttype.set('bar')
#basepath.set(os.path.basename(outdir))
#if len([f for f in os.listdir(outdir) if f.endswith('.p')]) > 0:
timestring('Corpus parsed and ready to interrogate: "%s"' % os.path.basename(outdir))
#else:
#timestring('Error: no files created in "%s"' % os.path.basename(outdir))
update_available_corpora()
def create_parsed_corpus():
import os
import re
import corpkit
from corpkit.corpus import Corpus
from corpkit.process import get_corenlp_path
parser_options('parse')
root.wait_window(poptions)
if not clicked_done.get():
return
unparsed_corpus_path = corpus_fullpath.get()
unparsed = Corpus(unparsed_corpus_path, print_info=False)
note.progvar.set(0)
unparsed_corpus_path = corpus_fullpath.get()
corenlppath.set(get_corenlp_path(corenlppath.get()))
if not corenlppath.get() or corenlppath.get() == 'None':
downstall_nlp = messagebox.askyesno("CoreNLP not found.",
"CoreNLP parser not found. Download/install it?")
if not downstall_nlp:
timestring('Cannot parse data without Stanford CoreNLP.')
return
jdk = check_jdk()
if jdk is False:
downstall_jdk = messagebox.askyesno("Java JDK", "You need Java JDK 1.8 to use CoreNLP.\n\nHit 'yes' to open web browser at download link. Once installed, corpkit should resume automatically")
if downstall_jdk:
import webbrowser
webbrowser.open_new('http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html')
import time
timestring('Waiting for Java JDK 1.8 installation to complete.')
while jdk is False:
jdk = check_jdk()
timestring('Waiting for Java JDK 1.8 installation to complete.')
time.sleep(5)
else:
timestring('Cannot parse data without Java JDK 1.8.')
return
parsed = unparsed.parse(speaker_segmentation=speakseg.get(),
proj_path=project_fullpath.get(),
copula_head=True,
multiprocess=False,
corenlppath=corenlppath.get(),
operations=parser_opts.get(),
root=root,
stdout=sys.stdout,
note=note,
memory_mb=parser_memory.get(),
metadata=parse_with_metadata.get())
if not parsed:
print('Error during parsing.')
sys.stdout = note.redir
current_corpus.set(parsed.name)
subdrs = [d for d in os.listdir(corpus_fullpath.get()) if \
os.path.isdir(os.path.join(corpus_fullpath.get(), d))]
if len(subdrs) == 0:
charttype.set('bar')
update_available_corpora()
timestring('Corpus parsed and ready to interrogate: "%s"' % parsed.name)
parse_button_text=StringVar()
parse_button_text.set('Create parsed corpus')
tokenise_button_text=StringVar()
tokenise_button_text.set('Create tokenised corpus')
path_to_new_unparsed_corpus = StringVar()
path_to_new_unparsed_corpus.set('')
add_corpus = StringVar()
add_corpus.set('')
add_corpus_button = StringVar()
add_corpus_button.set('Add corpus%s' % add_corpus.get())
selected_corpus_has_no_subcorpora = IntVar()
selected_corpus_has_no_subcorpora.set(0)
def add_subcorpora_to_build_box(path_to_corpus):
if not path_to_corpus:
return
import os
subc_listbox_build.configure(state=NORMAL)
subc_listbox_build.delete(0, 'end')
sub_corpora = [d for d in os.listdir(path_to_corpus) if os.path.isdir(os.path.join(path_to_corpus, d))]
if len(sub_corpora) == 0:
selected_corpus_has_no_subcorpora.set(1)
subc_listbox_build.bind('<<Modified>>', onselect_subc_build)
subc_listbox_build.insert(END, 'No subcorpora found.')
subc_listbox_build.configure(state=DISABLED)
else:
selected_corpus_has_no_subcorpora.set(0)
for e in sub_corpora:
subc_listbox_build.insert(END, e)
onselect_subc_build()
def select_corpus():
"""selects corpus for viewing/parsing
---not used anymore"""
from os.path import join as pjoin
from os.path import basename as bn
#parse_button_text.set('Parse: "%s"' % bn(unparsed_corpus_path))
tokenise_button_text.set('Tokenise: "%s"' % bn(unparsed_corpus_path))
path_to_new_unparsed_corpus.set(unparsed_corpus_path)
#add_corpus_button.set('Added: %s' % bn(unparsed_corpus_path))
where_to_put_corpus = pjoin(project_fullpath.get(), 'data')
sel_corpus.set(unparsed_corpus_path)
#sel_corpus_button.set('Selected: "%s"' % bn(unparsed_corpus_path))
parse_button_text.set('Parse: "%s"' % bn(unparsed_corpus_path))
add_subcorpora_to_build_box(unparsed_corpus_path)
timestring('Selected corpus: "%s"' % bn(unparsed_corpus_path))
def getcorpus():
"""copy unparsed texts to project folder"""
import shutil
import os
from corpkit.process import saferead
home = os.path.expanduser("~")
docpath = os.path.join(home, 'Documents')
if sys.platform == 'darwin':
the_kwargs = {'message': 'Select your corpus of unparsed text files.'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title = 'Path to unparsed corpus',
initialdir = docpath,
**the_kwargs)
where_to_put_corpus = os.path.join(project_fullpath.get(), 'data')
newc = os.path.join(where_to_put_corpus, os.path.basename(fp))
try:
shutil.copytree(fp, newc)
timestring('Corpus copied to project folder.')
except OSError:
if os.path.basename(fp) == '':
return
timestring('"%s" already exists in project.' % os.path.basename(fp))
return
from corpkit.build import folderise, can_folderise
if can_folderise(newc):
do_folderise = messagebox.askyesno("No subcorpora found",
"Your corpus contains multiple files, but no subfolders. " \
"Would you like to treat each file as a subcorpus?")
if do_folderise:
folderise(newc)
timestring('Turned files into subcorpora.')
# encode and rename files
for (rootdir, d, fs) in os.walk(newc):
for f in fs:
fpath = os.path.join(rootdir, f)
data, enc = saferead(fpath)
from corpkit.constants import OPENER, PYTHON_VERSION
with OPENER(fpath, "w") as f:
if PYTHON_VERSION == 2:
f.write(data.encode('utf-8', errors='ignore'))
else:
f.write(data)
# rename file
#dname = '-' + os.path.basename(rootdir)
#newname = fpath.replace('.txt', dname + '.txt')
#shutil.move(fpath, newname)
path_to_new_unparsed_corpus.set(newc)
add_corpus_button.set('Added: "%s"' % os.path.basename(fp))
current_corpus.set(os.path.basename(fp))
#sel_corpus.set(newc)
#sel_corpus_button.set('Selected corpus: "%s"' % os.path.basename(newc))
timestring('Corpus copied to project folder.')
parse_button_text.set('Parse: %s' % os.path.basename(newc))
tokenise_button_text.set('Tokenise: "%s"' % os.path.basename(newc))
add_subcorpora_to_build_box(newc)
update_available_corpora()
timestring('Selected corpus for viewing/parsing: "%s"' % os.path.basename(newc))
Label(tab0, text='Project', font=("Helvetica", 13, "bold")).grid(sticky=W, row=0, column=0)
#Label(tab0, text='New project', font=("Helvetica", 13, "bold")).grid(sticky=W, row=0, column=0)
Button(tab0, textvariable=new_proj_basepath, command=make_new_project, width=24).grid(row=1, column=0, sticky=W)
#Label(tab0, text='Open project: ').grid(row=2, column=0, sticky=W)
Button(tab0, textvariable=open_proj_basepath, command=load_project, width=24).grid(row=2, column=0, sticky=W)
#Label(tab0, text='Add corpus to project: ').grid(row=4, column=0, sticky=W)
addbut = Button(tab0, textvariable=add_corpus_button, width=24, state=DISABLED)
addbut.grid(row=3, column=0, sticky=W)
addbut.config(command=lambda: runner(addbut, getcorpus))
#Label(tab0, text='Corpus to parse: ').grid(row=6, column=0, sticky=W)
#Button(tab0, textvariable=sel_corpus_button, command=select_corpus, width=24).grid(row=4, column=0, sticky=W)
#Label(tab0, text='Parse: ').grid(row=8, column=0, sticky=W)
#speakcheck_build = Checkbutton(tab0, text="Speaker segmentation", variable=speakseg, state=DISABLED)
#speakcheck_build.grid(column=0, row=5, sticky=W)
parsebut = Button(tab0, textvariable=parse_button_text, width=24, state=DISABLED)
parsebut.grid(row=5, column=0, sticky=W)
parsebut.config(command=lambda: runner(parsebut, create_parsed_corpus))
#Label(tab0, text='Parse: ').grid(row=8, column=0, sticky=W)
tokbut = Button(tab0, textvariable=tokenise_button_text, width=24, state=DISABLED)
tokbut.grid(row=6, column=0, sticky=W)
tokbut.config(command=lambda: runner(tokbut, create_tokenised_text))
def onselect_subc_build(evt = False):
"""get selected subcorpus, delete editor, show files in subcorpus"""
import os
if evt:
# should only be one
for i in subc_sel_vals_build:
subc_sel_vals_build.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in subc_sel_vals_build:
subc_sel_vals_build.append(value)
# return for false click
if len(subc_sel_vals_build) == 0 and selected_corpus_has_no_subcorpora.get() == 0:
return
# destroy editor and canvas if possible
for ob in list(buildbits.values()):
try:
ob.destroy()
except:
pass
f_view.configure(state=NORMAL)
f_view.delete(0, 'end')
newp = path_to_new_unparsed_corpus.get()
if selected_corpus_has_no_subcorpora.get() == 0:
newsub = os.path.join(newp, subc_sel_vals_build[0])
else:
newsub = newp
fs = [f for f in os.listdir(newsub) if f.endswith('.txt') \
or f.endswith('.xml') \
or f.endswith('.conll') \
or f.endswith('.conllu')]
for e in fs:
f_view.insert(END, e)
if selected_corpus_has_no_subcorpora.get() == 0:
f_in_s.set('Files in subcorpus: %s' % subc_sel_vals_build[0])
else:
f_in_s.set('Files in corpus: %s' % os.path.basename(path_to_new_unparsed_corpus.get()))
# a listbox of subcorpora
Label(tab0, text='Subcorpora', font=("Helvetica", 13, "bold")).grid(row=7, column=0, sticky=W)
height = 21 if small_screen else 24
build_sub_f = Frame(tab0, width=24, height=height)
build_sub_f.grid(row=8, column=0, sticky=W, rowspan = 2, padx=(8,0))
build_sub_sb = Scrollbar(build_sub_f)
build_sub_sb.pack(side=RIGHT, fill=Y)
subc_listbox_build = Listbox(build_sub_f, selectmode = SINGLE, height=height, state=DISABLED, relief=SUNKEN, bg='#F4F4F4',
yscrollcommand=build_sub_sb.set, exportselection=False, width=24)
subc_listbox_build.pack(fill=BOTH)
xxy = subc_listbox_build.bind('<<ListboxSelect>>', onselect_subc_build)
subc_listbox_build.select_set(0)
build_sub_sb.config(command=subc_listbox_build.yview)
def show_a_tree(evt):
"""get selected file and show in file view"""
import os
from nltk import Tree
from nltk.tree import ParentedTree
from nltk.draw.util import CanvasFrame
from nltk.draw import TreeWidget
sbox = buildbits['sentsbox']
sent = sentdict[int(sbox.curselection()[0])]
t = ParentedTree.fromstring(sent)
# make a frame attached to tab0
#cf = CanvasFrame(tab0, width=200, height=200)
cf = Canvas(tab0, width=800, height=400, bd=5)
buildbits['treecanvas'] = cf
cf.grid(row=5, column=2, rowspan = 11, padx=(0,0))
if cf not in boxes:
boxes.append(cf)
# draw the tree and send to the frame's canvas
tc = TreeWidget(cf, t, draggable=1,
node_font=('helvetica', -10, 'bold'),
leaf_font=('helvetica', -10, 'italic'),
roof_fill='white', roof_color='black',
leaf_color='green4', node_color='blue2')
tc.bind_click_trees(tc.toggle_collapsed)
def select_all_editor(*args):
"""not currently using, but might be good for select all"""
editor = buildbits['editor']
editor.tag_add(SEL, "1.0", END)
editor.mark_set(INSERT, "1.0")
editor.see(INSERT)
return 'break'
def onselect_f(evt):
"""get selected file and show in file view"""
for box in boxes:
try:
box.destroy()
except:
pass
import os
# should only be one
for i in chosen_f:
chosen_f.pop()
wx = evt.widget
indices = wx.curselection()
for index in indices:
value = wx.get(index)
if value not in chosen_f:
chosen_f.append(value)
if len(chosen_f) == 0:
return
if chosen_f[0].endswith('.txt'):
newp = path_to_new_unparsed_corpus.get()
if selected_corpus_has_no_subcorpora.get() == 0:
fp = os.path.join(newp, subc_sel_vals_build[0], chosen_f[0])
else:
fp = os.path.join(newp, chosen_f[0])
if not os.path.isfile(fp):
fp = os.path.join(newp, os.path.basename(corpus_fullpath.get()), chosen_f[0])
from corpkit.constants import OPENER
with OPENER(fp, 'r', encoding='utf-8') as fo:
text = fo.read()
# needs a scrollbar
editor = Text(tab0, height=32)
bind_textfuncts_to_widgets([editor])
buildbits['editor'] = editor
editor.grid(row=1, column=2, rowspan=9, pady=(10,0), padx=(20, 0))
if editor not in boxes:
boxes.append(editor)
all_text_widgets.append(editor)
editor.bind("<%s-s>" % key, savebuttonaction)
editor.bind("<%s-S>" % key, savebuttonaction)
editor.config(borderwidth=0,
font="{Lucida Sans Typewriter} 12",
#foreground="green",
#background="black",
#insertbackground="white", # cursor
#selectforeground="green", # selection
#selectbackground="#008000",
wrap=WORD, # use word wrapping
width=64,
undo=True, # Tk 8.4
)
editor.delete(1.0, END)
editor.insert(END, text)
editor.mark_set(INSERT, 1.0)
editf.set('Edit file: %s' % chosen_f[0])
viewedit = Label(tab0, textvariable=editf, font=("Helvetica", 13, "bold"))
viewedit.grid(row=0, column=2, sticky=W, padx=(20, 0))
if viewedit not in boxes:
boxes.append(viewedit)
filename.set(chosen_f[0])
fullpath_to_file.set(fp)
but = Button(tab0, text='Save changes', command=savebuttonaction)
but.grid(row=9, column=2, sticky='SE')
buildbits['but'] = but
if but not in boxes:
boxes.append(but)
elif chosen_f[0].endswith('.conll') or chosen_f[0].endswith('.conllu'):
import re
parsematch = re.compile(r'^# parse=(.*)')
newp = path_to_new_unparsed_corpus.get()
if selected_corpus_has_no_subcorpora.get() == 0:
fp = os.path.join(newp, subc_sel_vals_build[0], chosen_f[0])
else:
fp = os.path.join(newp, chosen_f[0])
if not os.path.isfile(fp):
fp = os.path.join(newp, os.path.basename(corpus_fullpath.get()), chosen_f[0])
from corpkit.constants import OPENER
with OPENER(fp, 'r', encoding='utf-8') as fo:
text = fo.read()
lines = text.splitlines()
editf.set('View trees: %s' % chosen_f[0])
vieweditxml = Label(tab0, textvariable=editf, font=("Helvetica", 13, "bold"))
vieweditxml.grid(row=0, column=2, sticky=W, padx=(20,0))
buildbits['vieweditxml'] = vieweditxml
if vieweditxml not in boxes:
boxes.append(vieweditxml)
trees = []
def flatten_treestring(tree):
replaces = {'$ ': '$',
'`` ': '``',
' ,': ',',
' .': '.',
"'' ": "''",
" n't": "n't",
" 're": "'re",
" 'm": "'m",
" 's": "'s",
" 'd": "'d",
" 'll": "'ll",
' ': ' '}
import re
tree = re.sub(r'\(.*? ', '', tree).replace(')', '')
for k, v in replaces.items():
tree = tree.replace(k, v)
return tree
for l in lines:
searched = re.search(parsematch, l)
if searched:
bracktree = searched.group(1)
flat = flatten_treestring(bracktree)
trees.append([bracktree, flat])
sentsbox = Listbox(tab0, selectmode=SINGLE, width=120, font=("Courier New", 11))
if sentsbox not in boxes:
boxes.append(sentsbox)
buildbits['sentsbox'] = sentsbox
sentsbox.grid(row=1, column=2, rowspan=4, padx=(20,0))
sentsbox.delete(0, END)
for i in list(sentdict.keys()):
del sentdict[i]
for i, (t, f) in enumerate(trees):
cutshort = f[:80] + '...'
sentsbox.insert(END, '%d: %s' % (i + 1, f))
sentdict[i] = t
xxyyz = sentsbox.bind('<<ListboxSelect>>', show_a_tree)
f_in_s = StringVar()
f_in_s.set('Files in subcorpus ')
# a listbox of files
Label(tab0, textvariable=f_in_s, font=("Helvetica", 13, "bold")).grid(row=0, column=1, sticky='NW', padx=(30, 0))
height = 31 if small_screen else 36
build_f_box = Frame(tab0, height=height)
build_f_box.grid(row=1, column=1, rowspan = 9, padx=(20, 0), pady=(10, 0))
build_f_sb = Scrollbar(build_f_box)
build_f_sb.pack(side=RIGHT, fill=Y)
f_view = Listbox(build_f_box, selectmode = EXTENDED, height=height, state=DISABLED, relief=SUNKEN, bg='#F4F4F4',
exportselection=False, yscrollcommand=build_f_sb.set)
f_view.pack(fill=BOTH)
xxyy = f_view.bind('<<ListboxSelect>>', onselect_f)
f_view.select_set(0)
build_f_sb.config(command=f_view.yview)
editf = StringVar()
editf.set('Edit file: ')
def savebuttonaction(*args):
from corpkit.constants import OPENER, PYTHON_VERSION
editor = buildbits['editor']
text = editor.get(1.0, END)
with OPENER(fullpath_to_file.get(), "w") as fo:
if PYTHON_VERSION == 2:
fo.write(text.rstrip().encode("utf-8"))
fo.write("\n")
else:
fo.write(text.rstrip() + '\n')
timestring('%s saved.' % filename.get())
filename = StringVar()
filename.set('')
fullpath_to_file = StringVar()
fullpath_to_file.set('')
############ ############ ############ ############ ############
# MENU BAR # # MENU BAR # # MENU BAR # # MENU BAR # # MENU BAR #
############ ############ ############ ############ ############
realquit = IntVar()
realquit.set(0)
def clear_all():
import os
import sys
python = sys.executable
os.execl(python, python, * sys.argv)
def get_tool_pref_file():
"""get the location of the tool preferences files"""
return os.path.join(rd, 'tool_settings.ini')
def save_tool_prefs(printout=True):
"""save any preferences to tool preferences"""
try:
import configparser
except:
import ConfigParser as configparser
import os
Config = configparser.ConfigParser()
settingsfile = get_tool_pref_file()
if settingsfile is None:
timestring('No settings file found.')
return
# parsing for ints is causing errors?
Config.add_section('Projects')
Config.set('Projects','most recent', ';'.join(most_recent_projects[-5:]).lstrip(';'))
Config.add_section('CoreNLP')
Config.set('CoreNLP','Parser path', corenlppath.get())
Config.set('CoreNLP','Memory allocation', str(parser_memory.get()))
Config.add_section('Appearance')
Config.set('Appearance','Spreadsheet row header width', str(row_label_width.get()))
Config.set('Appearance','Spreadsheet cell width', str(cell_width.get()))
Config.add_section('Other')
Config.set('Other','Truncate concordance lines', str(truncate_conc_after.get()))
Config.set('Other','Truncate spreadsheets', str(truncate_spreadsheet_after.get()))
Config.set('Other','Automatic update check', str(do_auto_update.get()))
Config.set('Other','do concordancing', str(do_concordancing.get()))
Config.set('Other','Only format middle concordance column', str(only_format_match.get()))
Config.set('Other','p value', str(p_val.get()))
cfgfile = open(settingsfile ,'w')
Config.write(cfgfile)
#cell_width.get()
#row_label_width.get()
#truncate_conc_after.get()
#truncate_spreadsheet_after.get()
#do_auto_update.get()
if printout:
timestring('Tool preferences saved.')
def load_tool_prefs():
"""load preferences"""
import os
try:
import configparser
except:
import ConfigParser as configparser
settingsfile = get_tool_pref_file()
if settingsfile is None:
timestring('No settings file found.')
return
if not os.path.isfile(settingsfile):
timestring('No settings file found at %s' % settingsfile)
return
def tryer(config, var, section, name):
"""attempt to load a value, fail gracefully if not there"""
try:
if config.has_option(section, name):
bit = conmap(config, section).get(name, False)
if name in ['memory allocation', 'truncate spreadsheets',
'truncate concordance lines', 'p value']:
bit = int(bit)
else:
bit = bool(bit)
var.set(bit)
except:
pass
Config = configparser.ConfigParser()
Config.read(settingsfile)
tryer(Config, parser_memory, "CoreNLP", "memory allocation")
#tryer(Config, row_label_width, "Appearance", 'spreadsheet row header width')
#tryer(Config, cell_width, "Appearance", 'spreadsheet cell width')
tryer(Config, do_auto_update, "Other", 'automatic update check')
#tryer(Config, conc_when_int, "Other", 'concordance when interrogating')
tryer(Config, only_format_match, "Other", 'only format middle concordance column')
tryer(Config, do_concordancing, "Other", 'do concordancing')
#tryer(Config, noregex, "Other", 'disable regular expressions for plaintext search')
tryer(Config, truncate_conc_after, "Other", 'truncate concordance lines')
tryer(Config, truncate_spreadsheet_after, "Other", 'truncate spreadsheets')
tryer(Config, p_val, "Other", 'p value')
try:
parspath = conmap(Config, "CoreNLP")['parser path']
except:
parspath = 'default'
try:
mostrec = conmap(Config, "Projects")['most recent'].lstrip(';').split(';')
for i in mostrec:
most_recent_projects.append(i)
except:
pass
if parspath == 'default' or parspath == '':
corenlppath.set(os.path.join(os.path.expanduser("~"), 'corenlp'))
else:
corenlppath.set(parspath)
timestring('Tool preferences loaded.')
def save_config():
try:
import configparser
except:
import ConfigParser as configparser
import os
if any(v != '' for v in list(entryboxes.values())):
codscheme = ','.join([i.get().replace(',', '') for i in list(entryboxes.values())])
else:
codscheme = None
Config = configparser.ConfigParser()
cfgfile = open(os.path.join(project_fullpath.get(), 'settings.ini') ,'w')
Config.add_section('Build')
Config.add_section('Interrogate')
relcorpuspath = corpus_fullpath.get().replace(project_fullpath.get(), '').lstrip('/')
Config.set('Interrogate','Corpus path', relcorpuspath)
Config.set('Interrogate','Speakers', convert_speakdict_to_string(corpus_names_and_speakers))
#Config.set('Interrogate','dependency type', kind_of_dep.get())
Config.set('Interrogate','Treat files as subcorpora', str(files_as_subcorpora.get()))
Config.add_section('Edit')
Config.add_section('Visualise')
Config.set('Visualise','Plot style', plot_style.get())
Config.set('Visualise','Use TeX', str(texuse.get()))
Config.set('Visualise','x axis title', x_axis_l.get())
Config.set('Visualise','Colour scheme', chart_cols.get())
Config.add_section('Concordance')
Config.set('Concordance','font size', str(fsize.get()))
#Config.set('Concordance','dependency type', conc_kind_of_dep.get())
Config.set('Concordance','coding scheme', codscheme)
if win.get() == 'Window':
window = 70
else:
window = int(win.get())
Config.set('Concordance','window', str(window))
Config.add_section('Manage')
Config.set('Manage','Project path',project_fullpath.get())
Config.write(cfgfile)
timestring('Project settings saved to settings.ini.')
def quitfunc():
if in_a_project.get() == 1:
save_ask = messagebox.askyesno("Save settings",
"Save settings before quitting?")
if save_ask:
save_config()
save_tool_prefs()
realquit.set(1)
root.quit()
root.protocol("WM_DELETE_WINDOW", quitfunc)
def restart(newpath=False):
"""restarts corpkit .py or gui, designed for version updates"""
import sys
import os
import subprocess
import inspect
timestring('Restarting ... ')
# get path to current script
if newpath is False:
newpath = inspect.getfile(inspect.currentframe())
if sys.platform == "win32":
if newpath.endswith('.py'):
timestring('Not yet supported, sorry.')
return
os.startfile(newpath)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
if newpath.endswith('.py'):
opener = 'python'
if 'daniel/Work/corpkit' in newpath:
opener = '/Users/daniel/virtenvs/ssled/bin/python'
cmd = [opener, newpath]
else:
if sys.platform == "darwin":
cmd = [opener, '-n', newpath]
else:
cmd = [opener, newpath]
#os.system('%s %s' % (opener, newpath))
#subprocess.Popen(cmd)
from time import sleep
sleep(1)
#reload(inspect.getfile(inspect.currentframe()))
subprocess.Popen(cmd)
try:
the_splash.__exit__()
except:
pass
root.quit()
sys.exit()
def untar(fname, extractto):
"""untar a file"""
import tarfile
tar = tarfile.open(fname)
tar.extractall(extractto)
tar.close()
def update_corpkit(stver):
"""get new corpkit, delete this one, open it up"""
import sys
import os
import inspect
import corpkit
from corpkit.build import download_large_file
# get path to this script
corpath = rd
#corpath = inspect.getfile(inspect.currentframe())
# check we're using executable version, because .py users can
# use github to update
extens = '.%s' % fext
if extens not in corpath and sys.platform != 'darwin':
timestring("Get it from GitHub: https://www.github.com/interrogator/corpkit")
return
# split on .app or .exe, then re-add .app
apppath = corpath.split(extens , 1)[0] + extens
appdir = os.path.dirname(apppath)
# get new version and the abs path of the download dir and the tar file
url = 'https://raw.githubusercontent.com/interrogator/corpkit-app/master/corpkit-%s.tar.gz' % stver
path_to_app_parent = sys.argv[0]
if sys.platform == 'darwin':
if '.app' in path_to_app_parent:
path_to_app_parent = os.path.dirname(path_to_app_parent.split('.app', 1)[0])
else:
# WINDOWS SUPPORT
pass
if '.py' in path_to_app_parent:
py_script = True
path_to_app_parent = os.path.dirname(os.path.join(path_to_app_parent.split('.py', 1)[0]))
downloaded_dir, corpkittarfile = download_large_file(path_to_app_parent, \
url, root=root, note=note, actually_download = True)
timestring('Extracting update ...')
# why not extract to actual dir?
untar(corpkittarfile, downloaded_dir)
timestring('Applying update ...')
# delete the tar
#os.remove(corpkittarfile)
# get whatever the new app is called
newappfname = [f for f in os.listdir(downloaded_dir) if f.endswith(fext)][0]
absnewapp = os.path.join(downloaded_dir, newappfname)
# get the executable in the path
restart_now = messagebox.askyesno("Update and restart",
"Restart now?\n\nThis will delete the current version of corpkit.")
import shutil
if restart_now:
# remove this very app, but not script, just in case
if '.py' not in apppath:
if sys.platform == 'darwin':
shutil.rmtree(apppath)
# if windows, it's not a dir
else:
os.remove(apppath)
# move new version
if sys.platform == 'darwin':
shutil.copytree(absnewapp, os.path.join(appdir, newappfname))
# if windows, it's not a dir
else:
shutil.copy(absnewapp, os.path.join(appdir, newappfname))
# delete donwnloaded file and dir
shutil.rmtree(downloaded_dir)
restart(os.path.join(appdir, newappfname))
# shitty way to do this. what is the standard way of downloading and not installing?
else:
if sys.platform == 'darwin':
try:
shutil.copytree(absnewapp, os.path.join(appdir, newappfname))
except OSError:
shutil.copytree(absnewapp, os.path.join(appdir, newappfname + '-new'))
else:
try:
shutil.copy(absnewapp, os.path.join(appdir, newappfname))
except OSError:
shutil.copy(absnewapp, os.path.join(appdir, newappfname + '-new'))
timestring('New version in %s' % os.path.join(appdir, newappfname + '-new'))
return
def make_float_from_version(ver):
"""take a version string and turn it into a comparable float"""
ver = str(ver)
ndots_to_delete = ver.count('.') - 1
return float(ver[::-1].replace('.', '', ndots_to_delete)[::-1])
def modification_date(filename):
"""get datetime of file modification"""
import os
import datetime
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def check_updates(showfalse=True, lateprint=False, auto=False):
"""check for updates, minor and major."""
import os
import re
import datetime
from dateutil.parser import parse
import sys
import shutil
if noupdate:
return
# weird hacky way to not repeat request
if do_auto_update.get() == 0 and auto is True:
return
if do_auto_update_this_session.get() is False and auto is True:
return
# cancel auto if manual
if auto is False:
do_auto_update_this_session.set(0)
# get version as float
try:
oldstver = open(os.path.join(rd, 'VERSION.txt'), 'r').read().strip()
except:
import corpkit
oldstver = str(corpkit.__version__)
ver = make_float_from_version(oldstver)
# check for major update
try:
response = requests.get('https://www.github.com/interrogator/corpkit-app', verify=False)
html = response.text
except:
if showfalse:
messagebox.showinfo(
"No connection to remote server",
"Could not connect to remote server.")
return
reg = re.compile('title=.corpkit-([0-9\.]+)\.tar\.gz')
# get version number as string
stver = str(re.search(reg, html).group(1))
vnum = make_float_from_version(stver)
# check for major update
#if 2 == 2:
if vnum > ver:
timestring('Update found: corpkit %s' % stver)
download_update = messagebox.askyesno("Update available",
"Update available: corpkit %s\n\n Download now?" % stver)
if download_update:
update_corpkit(stver)
return
else:
timestring('Update found: corpkit %s. Not downloaded.' % stver)
return
# check for minor update
else:
import sys
timereg = re.compile(r'# <updated>(.*)<.updated>')
#if '.py' in sys.argv[0] and sys.platform == 'darwin':
#oldd = open(os.path.join(rd, 'gui.py'), 'r').read()
#elif '.app' in sys.argv[0]:
oldd = open(os.path.join(rd, 'gui.py'), 'r').read()
dateline = next(l for l in oldd.split('\n') if l.startswith('# <updated>'))
dat = re.search(timereg, dateline).group(1)
try:
olddate = parse(dat)
except:
olddate = modification_date(sys.argv[0])
try:
script_response = requests.get('https://raw.githubusercontent.com/interrogator/corpkit-app/master/gui.py', verify=False)
newscript = script_response.text
dateline = next(l for l in newscript.split('\n') if l.startswith('# <updated>'))
except:
if showfalse:
messagebox.showinfo(
"No connection to remote server",
"Could not connect to remote server.")
return
# parse the date part
try:
dat = re.search(timereg, dateline).group(1)
newdate = parse(dat)
except:
if showfalse:
messagebox.showinfo(
"Error checking for update.",
"Error checking for update.")
return
# testing code
#if 2 == 2:
if newdate > olddate:
timestring('Minor update found: corpkit %s' % stver)
download_update = messagebox.askyesno("Minor update available",
"Minor update available: corpkit %s\n\n Download and apply now?" % stver)
if download_update:
url = 'https://raw.githubusercontent.com/interrogator/corpkit-app/master/corpkit-%s' % oldstver
# update script
if not sys.argv[0].endswith('gui.py'):
script_url = 'https://raw.githubusercontent.com/interrogator/corpkit-app/master/gui.py'
response = requests.get(script_url, verify=False)
with open(os.path.join(rd, 'gui.py'), "w") as fo:
fo.write(response.text)
else:
timestring("Can't replace developer copy, sorry.")
return
dir_containing_ex, execut = download_large_file(project_fullpath.get(),
url = url, root=root, note=note)
# make sure we can execute the new script
import os
os.chmod(execut, 0o777)
if not sys.argv[0].endswith('gui.py'):
os.remove(os.path.join(rd, 'corpkit-%s' % oldstver))
shutil.move(execut, os.path.join(rd, 'corpkit-%s' % oldstver))
shutil.rmtree(dir_containing_ex)
else:
timestring("Can't replace developer copy, sorry.")
return
#import inspect
#sys.argv[0]
#extens = '.%s' % fext
#if extens not in corpath and sys.platform != 'darwin':
# timestring("Get it from GitHub: https://www.github.com/interrogator/corpkit")
# return
## split on .app or .exe, then re-add .app
#apppath = corpath.split(extens , 1)[0] + extens
restart(sys.argv[0].split('.app', 1)[0] + '.app')
return
else:
timestring('Minor update found: corpkit %s, %s. Not downloaded.' % (stver, dat.replace('T', ', ')))
return
if showfalse:
messagebox.showinfo(
"Up to date!",
"corpkit (version %s) up to date!" % oldstver)
timestring('corpkit (version %s) up to date.' % oldstver)
return
def start_update_check():
if noupdate:
return
try:
check_updates(showfalse=False, lateprint=True, auto=True)
except:
filemenu.entryconfig("Check for updates", state="disabled")
def unmax():
"""stop it being always on top"""
root.attributes('-topmost', False)
root.after(1000, unmax)
if not '.py' in sys.argv[0]:
root.after(10000, start_update_check)
def set_corenlp_path():
if sys.platform == 'darwin':
the_kwargs = {'message': 'Select folder containing the CoreNLP parser.'}
else:
the_kwargs = {}
fp = filedialog.askdirectory(title='CoreNLP path',
initialdir=os.path.expanduser("~"),
**the_kwargs)
if fp and fp != '':
corenlppath.set(fp)
if not get_fullpath_to_jars(corenlppath):
recog = messagebox.showwarning(title='CoreNLP not found',
message="CoreNLP not found in %s." % fp )
timestring("CoreNLP not found in %s." % fp )
else:
save_tool_prefs()
def config_menu(*args):
import os
fp = corpora_fullpath.get()
recentmenu.delete(0, END)
if len(most_recent_projects) == 0:
filemenu.entryconfig("Open recent project", state="disabled")
if len(most_recent_projects) == 1 and most_recent_projects[0] == '':
filemenu.entryconfig("Open recent project", state="disabled")
else:
filemenu.entryconfig("Open recent project", state="normal")
for c in list(set(most_recent_projects[::-1][:5])):
if c:
lab = os.path.join(os.path.basename(os.path.dirname(c)), os.path.basename(c))
recentmenu.add_radiobutton(label=lab, variable=recent_project, value = c)
if os.path.isdir(fp):
all_corpora = get_all_corpora()
if len(all_corpora) > 0:
filemenu.entryconfig("Select corpus", state="normal")
selectmenu.delete(0, END)
for c in all_corpora:
selectmenu.add_radiobutton(label=c, variable=current_corpus, value = c)
else:
filemenu.entryconfig("Select corpus", state="disabled")
else:
filemenu.entryconfig("Select corpus", state="disabled")
#filemenu.entryconfig("Manage project", state="disabled")
if in_a_project.get() == 0:
filemenu.entryconfig("Save project settings", state="disabled")
filemenu.entryconfig("Load project settings", state="disabled")
filemenu.entryconfig("Manage project", state="disabled")
#filemenu.entryconfig("Set CoreNLP path", state="disabled")
else:
filemenu.entryconfig("Save project settings", state="normal")
filemenu.entryconfig("Load project settings", state="normal")
filemenu.entryconfig("Manage project", state="normal")
#filemenu.entryconfig("Set CoreNLP path", state="normal")
menubar = Menu(root)
selectmenu = Menu(root)
recentmenu = Menu(root)
if sys.platform == 'darwin':
filemenu = Menu(menubar, tearoff=0, name='apple', postcommand=config_menu)
else:
filemenu = Menu(menubar, tearoff=0, postcommand=config_menu)
filemenu.add_command(label="New project", command=make_new_project)
filemenu.add_command(label="Open project", command=load_project)
filemenu.add_cascade(label="Open recent project", menu=recentmenu)
filemenu.add_cascade(label="Select corpus", menu=selectmenu)
filemenu.add_separator()
filemenu.add_command(label="Save project settings", command=save_config)
filemenu.add_command(label="Load project settings", command=load_config)
filemenu.add_separator()
filemenu.add_command(label="Save tool preferences", command=save_tool_prefs)
filemenu.add_separator()
filemenu.add_command(label="Manage project", command=manage_popup)
filemenu.add_separator()
#filemenu.add_command(label="Coding scheme print", command=print_entryboxes)
# broken on deployed version ... path to self stuff
#filemenu.add_separator()
filemenu.add_command(label="Check for updates", command=check_updates)
#filemenu.entryconfig("Check for updates", state="disabled")
#filemenu.add_separator()
#filemenu.add_command(label="Restart tool", command=restart)
filemenu.add_separator()
#filemenu.add_command(label="Exit", command=quitfunc)
menubar.add_cascade(label="File", menu=filemenu)
if sys.platform == 'darwin':
windowmenu = Menu(menubar, name='window')
menubar.add_cascade(menu=windowmenu, label='Window')
else:
sysmenu = Menu(menubar, name='system')
menubar.add_cascade(menu=sysmenu)
def schemesshow(*args):
"""only edit schemes once in project"""
import os
if project_fullpath.get() == '':
schemenu.entryconfig("Wordlists", state="disabled")
schemenu.entryconfig("Coding scheme", state="disabled")
else:
schemenu.entryconfig("Wordlists", state="normal")
schemenu.entryconfig("Coding scheme", state="normal")
schemenu = Menu(menubar, tearoff=0, postcommand=schemesshow)
menubar.add_cascade(label="Schemes", menu=schemenu)
schemenu.add_command(label="Coding scheme", command=codingschemer)
schemenu.add_command(label="Wordlists", command=custom_lists)
# prefrences section
if sys.platform == 'darwin':
root.createcommand('tk::mac::ShowPreferences', preferences_popup)
def about_box():
"""About message with current corpkit version"""
import os
try:
oldstver = str(open(os.path.join(rd, 'VERSION.txt'), 'r').read().strip())
except:
import corpkit
oldstver = str(corpkit.__version__)
messagebox.showinfo('About', 'corpkit %s\n\ninterrogator.github.io/corpkit\ngithub.com/interrogator/corpkit\npypi.python.org/pypi/corpkit\n\n' \
'Creator: Daniel McDonald\[email protected]' % oldstver)
def show_log():
"""save log text as txt file and open it"""
import os
the_input = '\n'.join([x for x in note.log_stream])
#the_input = note.text.get("1.0",END)
c = 0
logpath = os.path.join(log_fullpath.get(), 'log-%s.txt' % str(c).zfill(2))
while os.path.isfile(logpath):
logpath = os.path.join(log_fullpath.get(), 'log-%s.txt' % str(c).zfill(2))
c += 1
with open(logpath, "w") as fo:
fo.write(the_input)
prnt = os.path.join('logs', os.path.basename(logpath))
timestring('Log saved to "%s".' % prnt)
import sys
if sys.platform == "win32":
os.startfile(logpath)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
import subprocess
subprocess.call(['open', logpath])
def bind_textfuncts_to_widgets(lst):
"""add basic cut copy paste to text entry widgets"""
for i in lst:
i.bind("<%s-a>" % key, select_all_text)
i.bind("<%s-A>" % key, select_all_text)
i.bind("<%s-v>" % key, paste_into_textwidget)
i.bind("<%s-V>" % key, paste_into_textwidget)
i.bind("<%s-x>" % key, cut_from_textwidget)
i.bind("<%s-X>" % key, cut_from_textwidget)
i.bind("<%s-c>" % key, copy_from_textwidget)
i.bind("<%s-C>" % key, copy_from_textwidget)
try:
i.config(undo = True)
except:
pass
# load preferences
load_tool_prefs()
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help", command=lambda: show_help('h'))
helpmenu.add_command(label="Query writing", command=lambda: show_help('q'))
helpmenu.add_command(label="Troubleshooting", command=lambda: show_help('t'))
helpmenu.add_command(label="Save log", command=show_log)
#helpmenu.add_command(label="Set CoreNLP path", command=set_corenlp_path)
helpmenu.add_separator()
helpmenu.add_command(label="About", command=about_box)
menubar.add_cascade(label="Help", menu=helpmenu)
if sys.platform == 'darwin':
import corpkit
import subprocess
ver = corpkit.__version__
corpath = os.path.dirname(corpkit.__file__)
if not corpath.startswith('/Library/Python') and not 'corpkit/corpkit/corpkit' in corpath:
try:
subprocess.call('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "corpkit-%s" to true' ''' % ver, shell = True)
except:
pass
root.config(menu=menubar)
note.focus_on(tab1)
if loadcurrent:
load_project(loadcurrent)
root.deiconify()
root.lift()
try:
root._splash.__exit__()
except:
pass
root.wm_state('normal')
#root.resizable(TRUE,TRUE)
# overwrite quitting behaviour, prompt to save settings
root.createcommand('exit', quitfunc)
root.mainloop()
if __name__ == "__main__":
# the traceback is mostly for debugging pyinstaller errors
import sys
import traceback
import os
lc = sys.argv[-1] if os.path.isdir(sys.argv[-1]) else False
#if lc and sys.argv[-1] == '.':
# lc = os.path.basename(os.getcwd())
# os.chdir('..')
debugmode = 'debug' in list(sys.argv)
def install(name, loc):
"""if we don't have a module, download it"""
try:
import importlib
importlib.import_module(name)
except ImportError:
import pip
pip.main(['install', loc])
tkintertablecode = ('tkintertable', 'git+https://github.com/interrogator/tkintertable.git')
pilcode = ('PIL', 'http://effbot.org/media/downloads/Imaging-1.1.7.tar.gz')
if not any(arg.lower() == 'noinstall' for arg in sys.argv):
install(*tkintertablecode)
from corpkit.constants import PYTHON_VERSION
if PYTHON_VERSION == 2:
install(*pilcode)
try:
if lc:
corpkit_gui(loadcurrent=lc, debug=debugmode)
else:
corpkit_gui(debug=debugmode)
except:
exc_type, exc_value, exc_traceback=sys.exc_info()
print("*** print_tb:")
print(traceback.print_tb(exc_traceback, limit=1, file=sys.stdout))
print("*** print_exception:")
print(traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout))
print("*** print_exc:")
print(traceback.print_exc())
print("*** format_exc, first and last line:")
formatted_lines = traceback.format_exc()
print(formatted_lines)
print("*** format_exception:")
print('\n'.join(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
print("*** extract_tb:")
print('\n'.join([str(i) for i in traceback.extract_tb(exc_traceback)]))
print("*** format_tb:")
print(traceback.format_tb(exc_traceback))
print("*** tb_lineno:", exc_traceback.tb_lineno)
| mit |
ephes/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/datasets/samples_generator.py | 35 | 56035 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=True,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
Y = MultiLabelBinarizer().fit([range(n_classes)]).transform(Y)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
drammock/mne-python | examples/time_frequency/time_frequency_global_field_power.py | 10 | 5307 | """
.. _ex-time-freq-global-field-power:
===========================================================
Explore event-related dynamics for specific frequency bands
===========================================================
The objective is to show you how to explore spectrally localized
effects. For this purpose we adapt the method described in
:footcite:`HariSalmelin1997` and use it on the somato dataset.
The idea is to track the band-limited temporal evolution
of spatial patterns by using the :term:`global field power` (GFP).
We first bandpass filter the signals and then apply a Hilbert transform. To
reveal oscillatory activity the evoked response is then subtracted from every
single trial. Finally, we rectify the signals prior to averaging across trials
by taking the magniude of the Hilbert.
Then the :term:`GFP` is computed as described in
:footcite:`EngemannGramfort2015`, using the sum of the
squares but without normalization by the rank.
Baselining is subsequently applied to make the :term:`GFP` comparable
between frequencies.
The procedure is then repeated for each frequency band of interest and
all :term:`GFPs<GFP>` are visualized. To estimate uncertainty, non-parametric
confidence intervals are computed as described in :footcite:`EfronHastie2016`
across channels.
The advantage of this method over summarizing the Space x Time x Frequency
output of a Morlet Wavelet in frequency bands is relative speed and, more
importantly, the clear-cut comparability of the spectral decomposition (the
same type of filter is used across all bands).
We will use this dataset: :ref:`somato-dataset`
References
----------
.. footbibliography::
""" # noqa: E501
# Authors: Denis A. Engemann <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import somato
from mne.baseline import rescale
from mne.stats import bootstrap_confidence_interval
###############################################################################
# Set parameters
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# let's explore some frequency bands
iter_freqs = [
('Theta', 4, 7),
('Alpha', 8, 12),
('Beta', 13, 25),
('Gamma', 30, 45)
]
###############################################################################
# We create average power time courses for each frequency band
# set epoching parameters
event_id, tmin, tmax = 1, -1., 3.
baseline = None
# get the header to extract events
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
frequency_map = list()
for band, fmin, fmax in iter_freqs:
# (re)load the data to save memory
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg='grad', eog=True) # we just look at gradiometers
raw.load_data()
# bandpass filter
raw.filter(fmin, fmax, n_jobs=1, # use more jobs to speed up.
l_trans_bandwidth=1, # make sure filter params are the same
h_trans_bandwidth=1) # in each band and skip "auto" option.
# epoch
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=baseline,
reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
# remove evoked response
epochs.subtract_evoked()
# get analytic signal (envelope)
epochs.apply_hilbert(envelope=True)
frequency_map.append(((band, fmin, fmax), epochs.average()))
del epochs
del raw
###############################################################################
# Now we can compute the Global Field Power
# We can track the emergence of spatial patterns compared to baseline
# for each frequency band, with a bootstrapped confidence interval.
#
# We see dominant responses in the Alpha and Beta bands.
# Helper function for plotting spread
def stat_fun(x):
"""Return sum of squares."""
return np.sum(x ** 2, axis=0)
# Plot
fig, axes = plt.subplots(4, 1, figsize=(10, 7), sharex=True, sharey=True)
colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 4))
for ((freq_name, fmin, fmax), average), color, ax in zip(
frequency_map, colors, axes.ravel()[::-1]):
times = average.times * 1e3
gfp = np.sum(average.data ** 2, axis=0)
gfp = mne.baseline.rescale(gfp, times, baseline=(None, 0))
ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)
ax.axhline(0, linestyle='--', color='grey', linewidth=2)
ci_low, ci_up = bootstrap_confidence_interval(average.data, random_state=0,
stat_fun=stat_fun)
ci_low = rescale(ci_low, average.times, baseline=(None, 0))
ci_up = rescale(ci_up, average.times, baseline=(None, 0))
ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3)
ax.grid(True)
ax.set_ylabel('GFP')
ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),
xy=(0.95, 0.8),
horizontalalignment='right',
xycoords='axes fraction')
ax.set_xlim(-1000, 3000)
axes.ravel()[-1].set_xlabel('Time [ms]')
| bsd-3-clause |
tombstone/models | research/a3c_blogpost/a3c_cartpole.py | 1 | 12387 | import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import threading
import gym
import multiprocessing
import numpy as np
from queue import Queue
import argparse
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras import layers
parser = argparse.ArgumentParser(description='Run A3C algorithm on the game '
'Cartpole.')
parser.add_argument('--algorithm', default='a3c', type=str,
help='Choose between \'a3c\' and \'random\'.')
parser.add_argument('--train', dest='train', action='store_true',
help='Train our model.')
parser.add_argument('--lr', default=0.001,
help='Learning rate for the shared optimizer.')
parser.add_argument('--update-freq', default=20, type=int,
help='How often to update the global model.')
parser.add_argument('--max-eps', default=1000, type=int,
help='Global maximum number of episodes to run.')
parser.add_argument('--gamma', default=0.99,
help='Discount factor of rewards.')
parser.add_argument('--save-dir', default='/tmp/', type=str,
help='Directory in which you desire to save the model.')
args = parser.parse_args()
class ActorCriticModel(keras.Model):
def __init__(self, state_size, action_size):
super(ActorCriticModel, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.dense1 = layers.Dense(100, activation='relu')
self.policy_logits = layers.Dense(action_size)
self.dense2 = layers.Dense(100, activation='relu')
self.values = layers.Dense(1)
def call(self, inputs):
# Forward pass
x = self.dense1(inputs)
logits = self.policy_logits(x)
v1 = self.dense2(inputs)
values = self.values(v1)
return logits, values
def record(episode,
episode_reward,
worker_idx,
global_ep_reward,
result_queue,
total_loss,
num_steps):
"""Helper function to store score and print statistics.
Arguments:
episode: Current episode
episode_reward: Reward accumulated over the current episode
worker_idx: Which thread (worker)
global_ep_reward: The moving average of the global reward
result_queue: Queue storing the moving average of the scores
total_loss: The total loss accumualted over the current episode
num_steps: The number of steps the episode took to complete
"""
if global_ep_reward == 0:
global_ep_reward = episode_reward
else:
global_ep_reward = global_ep_reward * 0.99 + episode_reward * 0.01
print(
f"Episode: {episode} | "
f"Moving Average Reward: {int(global_ep_reward)} | "
f"Episode Reward: {int(episode_reward)} | "
f"Loss: {int(total_loss / float(num_steps) * 1000) / 1000} | "
f"Steps: {num_steps} | "
f"Worker: {worker_idx}"
)
result_queue.put(global_ep_reward)
return global_ep_reward
class RandomAgent:
"""Random Agent that will play the specified game
Arguments:
env_name: Name of the environment to be played
max_eps: Maximum number of episodes to run agent for.
"""
def __init__(self, env_name, max_eps):
self.env = gym.make(env_name)
self.max_episodes = max_eps
self.global_moving_average_reward = 0
self.res_queue = Queue()
def run(self):
reward_avg = 0
for episode in range(self.max_episodes):
done = False
self.env.reset()
reward_sum = 0.0
steps = 0
while not done:
# Sample randomly from the action space and step
_, reward, done, _ = self.env.step(self.env.action_space.sample())
steps += 1
reward_sum += reward
# Record statistics
self.global_moving_average_reward = record(episode,
reward_sum,
0,
self.global_moving_average_reward,
self.res_queue, 0, steps)
reward_avg += reward_sum
final_avg = reward_avg / float(self.max_episodes)
print("Average score across {} episodes: {}".format(self.max_episodes, final_avg))
return final_avg
class MasterAgent():
def __init__(self):
self.game_name = 'CartPole-v0'
save_dir = args.save_dir
self.save_dir = save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
env = gym.make(self.game_name)
self.state_size = env.observation_space.shape[0]
self.action_size = env.action_space.n
self.opt = tf.compat.v1.train.AdamOptimizer(args.lr, use_locking=True)
print(self.state_size, self.action_size)
self.global_model = ActorCriticModel(self.state_size, self.action_size) # global network
self.global_model(tf.convert_to_tensor(np.random.random((1, self.state_size)), dtype=tf.float32))
def train(self):
if args.algorithm == 'random':
random_agent = RandomAgent(self.game_name, args.max_eps)
random_agent.run()
return
res_queue = Queue()
workers = [Worker(self.state_size,
self.action_size,
self.global_model,
self.opt, res_queue,
i, game_name=self.game_name,
save_dir=self.save_dir) for i in range(multiprocessing.cpu_count())]
for i, worker in enumerate(workers):
print("Starting worker {}".format(i))
worker.start()
moving_average_rewards = [] # record episode reward to plot
while True:
reward = res_queue.get()
if reward is not None:
moving_average_rewards.append(reward)
else:
break
[w.join() for w in workers]
plt.plot(moving_average_rewards)
plt.ylabel('Moving average ep reward')
plt.xlabel('Step')
plt.savefig(os.path.join(self.save_dir,
'{} Moving Average.png'.format(self.game_name)))
plt.show()
def play(self):
env = gym.make(self.game_name).unwrapped
state = env.reset()
model = self.global_model
model_path = os.path.join(self.save_dir, 'model_{}.h5'.format(self.game_name))
print('Loading model from: {}'.format(model_path))
model.load_weights(model_path)
done = False
step_counter = 0
reward_sum = 0
try:
while not done:
env.render(mode='rgb_array')
policy, value = model(tf.convert_to_tensor(state[None, :], dtype=tf.float32))
policy = tf.nn.softmax(policy)
action = np.argmax(policy)
state, reward, done, _ = env.step(action)
reward_sum += reward
print("{}. Reward: {}, action: {}".format(step_counter, reward_sum, action))
step_counter += 1
except KeyboardInterrupt:
print("Received Keyboard Interrupt. Shutting down.")
finally:
env.close()
class Memory:
def __init__(self):
self.states = []
self.actions = []
self.rewards = []
def store(self, state, action, reward):
self.states.append(state)
self.actions.append(action)
self.rewards.append(reward)
def clear(self):
self.states = []
self.actions = []
self.rewards = []
class Worker(threading.Thread):
# Set up global variables across different threads
global_episode = 0
# Moving average reward
global_moving_average_reward = 0
best_score = 0
save_lock = threading.Lock()
def __init__(self,
state_size,
action_size,
global_model,
opt,
result_queue,
idx,
game_name='CartPole-v0',
save_dir='/tmp'):
super(Worker, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.result_queue = result_queue
self.global_model = global_model
self.opt = opt
self.local_model = ActorCriticModel(self.state_size, self.action_size)
self.worker_idx = idx
self.game_name = game_name
self.env = gym.make(self.game_name).unwrapped
self.save_dir = save_dir
self.ep_loss = 0.0
def run(self):
total_step = 1
mem = Memory()
while Worker.global_episode < args.max_eps:
current_state = self.env.reset()
mem.clear()
ep_reward = 0.
ep_steps = 0
self.ep_loss = 0
time_count = 0
done = False
while not done:
logits, _ = self.local_model(
tf.convert_to_tensor(current_state[None, :],
dtype=tf.float32))
probs = tf.nn.softmax(logits)
action = np.random.choice(self.action_size, p=probs.numpy()[0])
new_state, reward, done, _ = self.env.step(action)
if done:
reward = -1
ep_reward += reward
mem.store(current_state, action, reward)
if time_count == args.update_freq or done:
# Calculate gradient wrt to local model. We do so by tracking the
# variables involved in computing the loss by using tf.GradientTape
with tf.GradientTape() as tape:
total_loss = self.compute_loss(done,
new_state,
mem,
args.gamma)
self.ep_loss += total_loss
# Calculate local gradients
grads = tape.gradient(total_loss, self.local_model.trainable_weights)
# Push local gradients to global model
self.opt.apply_gradients(zip(grads,
self.global_model.trainable_weights))
# Update local model with new weights
self.local_model.set_weights(self.global_model.get_weights())
mem.clear()
time_count = 0
if done: # done and print information
Worker.global_moving_average_reward = \
record(Worker.global_episode, ep_reward, self.worker_idx,
Worker.global_moving_average_reward, self.result_queue,
self.ep_loss, ep_steps)
# We must use a lock to save our model and to print to prevent data races.
if ep_reward > Worker.best_score:
with Worker.save_lock:
print("Saving best model to {}, "
"episode score: {}".format(self.save_dir, ep_reward))
self.global_model.save_weights(
os.path.join(self.save_dir,
'model_{}.h5'.format(self.game_name))
)
Worker.best_score = ep_reward
Worker.global_episode += 1
ep_steps += 1
time_count += 1
current_state = new_state
total_step += 1
self.result_queue.put(None)
def compute_loss(self,
done,
new_state,
memory,
gamma=0.99):
if done:
reward_sum = 0. # terminal
else:
reward_sum = self.local_model(
tf.convert_to_tensor(new_state[None, :],
dtype=tf.float32))[-1].numpy()[0]
# Get discounted rewards
discounted_rewards = []
for reward in memory.rewards[::-1]: # reverse buffer r
reward_sum = reward + gamma * reward_sum
discounted_rewards.append(reward_sum)
discounted_rewards.reverse()
logits, values = self.local_model(
tf.convert_to_tensor(np.vstack(memory.states),
dtype=tf.float32))
# Get our advantages
advantage = tf.convert_to_tensor(np.array(discounted_rewards)[:, None],
dtype=tf.float32) - values
# Value loss
value_loss = advantage ** 2
# Calculate our policy loss
policy = tf.nn.softmax(logits)
entropy = tf.nn.softmax_cross_entropy_with_logits(labels=policy, logits=logits)
policy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=memory.actions,
logits=logits)
policy_loss *= tf.stop_gradient(advantage)
policy_loss -= 0.01 * entropy
total_loss = tf.reduce_mean((0.5 * value_loss + policy_loss))
return total_loss
if __name__ == '__main__':
print(args)
master = MasterAgent()
if args.train:
master.train()
else:
master.play()
| apache-2.0 |
sgenoud/scikit-learn | sklearn/feature_extraction/text.py | 1 | 28251 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
#
# License: BSD Style.
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
import re
import unicodedata
from operator import itemgetter
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import normalize
from ..utils.fixes import Counter
from .stop_words import ENGLISH_STOP_WORDS
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return u''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(ur"<([^>]+)>", flags=re.UNICODE).sub(u" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str) or isinstance(stop, unicode):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class CountVectorizer(BaseEstimator):
"""Convert a collection of raw documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analysing the data. The default
analyzer does simple stop word filtering for English.
Parameters
----------
input: string {'filename', 'file', 'content'}
If filename, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have 'read' method (file-like
object) it is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
charset: string, 'utf-8' by default.
If bytes or files are given to analyze, this charset is used to
decode.
charset_error: {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `charset`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
min_n: integer
The lower boundary of the range of n-values for different n-grams to be
extracted.
max_n: integer
The upper boundary of the range of n-values for different n-grams to be
extracted. All values of n such that min_n <= n <= max_n will be used.
stop_words: string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned is currently the only
supported string value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more letters characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0], optional, 1.0 by default
When building the vocabulary ignore terms that have a term frequency
strictly higher than the given threshold (corpus specific stop words).
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
"""
_white_spaces = re.compile(ur"\s\s+")
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=ur"\b\w\w+\b",
min_n=1, max_n=1, analyzer='word',
max_df=1.0, max_features=None,
vocabulary=None, binary=False, dtype=long):
self.input = input
self.charset = charset
self.charset_error = charset_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.min_n = min_n
self.max_n = max_n
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.max_features = max_features
if vocabulary is not None:
self.fixed_vocabulary = True
if not hasattr(vocabulary, 'get'):
vocabulary = dict((t, i) for i, t in enumerate(vocabulary))
self.vocabulary_ = vocabulary
else:
self.fixed_vocabulary = False
self.binary = binary
self.dtype = dtype
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
doc = open(doc, 'rb').read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.charset, self.charset_error)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
if self.min_n != 1 or self.max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(self.min_n,
min(self.max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(u" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(u" ", text_document)
text_len = len(text_document)
ngrams = []
for n in xrange(self.min_n, min(self.max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the however of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif hasattr(self.strip_accents, '__call__'):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that split a string in sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if hasattr(self.analyzer, '__call__'):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme' %
self.tokenize)
def _term_count_dicts_to_matrix(self, term_count_dicts):
i_indices = []
j_indices = []
values = []
vocabulary = self.vocabulary_
for i, term_count_dict in enumerate(term_count_dicts):
for term, count in term_count_dict.iteritems():
j = vocabulary.get(term)
if j is not None:
i_indices.append(i)
j_indices.append(j)
values.append(count)
# free memory as we go
term_count_dict.clear()
shape = (len(term_count_dicts), max(vocabulary.itervalues()) + 1)
spmatrix = sp.coo_matrix((values, (i_indices, j_indices)),
shape=shape, dtype=self.dtype)
if self.binary:
spmatrix.data[:] = 1
return spmatrix
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return the count vectors
This is more efficient than calling fit followed by transform.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
if self.fixed_vocabulary:
# No need to fit anything, directly perform the transformation.
# We intentionally don't call the transform method to make it
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc))
for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
self.vocabulary_ = {}
# result of document conversion to term count dicts
term_counts_per_doc = []
term_counts = Counter()
# term counts across entire corpus (count each term maximum once per
# document)
document_counts = Counter()
max_df = self.max_df
max_features = self.max_features
analyze = self.build_analyzer()
# TODO: parallelize the following loop with joblib?
# (see XXX up ahead)
for doc in raw_documents:
term_count_current = Counter(analyze(doc))
term_counts.update(term_count_current)
if max_df < 1.0:
document_counts.update(term_count_current.iterkeys())
term_counts_per_doc.append(term_count_current)
n_doc = len(term_counts_per_doc)
# filter out stop words: terms that occur in almost all documents
if max_df < 1.0:
max_document_count = max_df * n_doc
stop_words = set(t for t, dc in document_counts.iteritems()
if dc > max_document_count)
else:
stop_words = set()
# list the terms that should be part of the vocabulary
if max_features is None:
terms = set(term_counts) - stop_words
else:
# extract the most frequent terms for the vocabulary
terms = set()
for t, tc in term_counts.most_common():
if t not in stop_words:
terms.add(t)
if len(terms) >= max_features:
break
# store the learned stop words to make it easier to debug the value of
# max_df
self.max_df_stop_words_ = stop_words
# store map from term name to feature integer index: we sort the term
# to have reproducible outcome for the vocabulary structure: otherwise
# the mapping from feature name to indices might depend on the memory
# layout of the machine. Furthermore sorted terms might make it
# possible to perform binary search in the feature names array.
self.vocabulary_ = dict(((t, i) for i, t in enumerate(sorted(terms))))
# the term_counts and document_counts might be useful statistics, are
# we really sure want we want to drop them? They take some memory but
# can be useful for corpus introspection
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def transform(self, raw_documents):
"""Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided in the constructor.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
# raw_documents can be an iterable so we don't know its size in
# advance
# XXX @larsmans tried to parallelize the following loop with joblib.
# The result was some 20% slower than the serial version.
analyze = self.build_analyzer()
term_counts_per_doc = [Counter(analyze(doc)) for doc in raw_documents]
return self._term_count_dicts_to_matrix(term_counts_per_doc)
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
if sp.isspmatrix_coo(X): # COO matrix is not indexable
X = X.tocsr()
elif not sp.issparse(X):
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(self.vocabulary_.keys())
indices = np.array(self.vocabulary_.values())
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in xrange(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0:
raise ValueError("Vocabulary wasn't fitted or is empty!")
return [t for t, i in sorted(self.vocabulary_.iteritems(),
key=itemgetter(1))]
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf–idf representation
Tf means term-frequency while tf–idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf–idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
In the SMART notation used in IR, this class implements several tf–idf
variants. Tf is always "n" (natural), idf is "t" iff use_idf is given,
"n" otherwise, and normalization is "c" iff norm='l2', "n" iff norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68–74.`
.. [MSR2008] `C.D. Manning, H. Schütze and P. Raghavan (2008). Introduction
to Information Retrieval. Cambridge University Press,
pp. 121–125.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
self.idf_ = None
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X: sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if self.use_idf:
if not hasattr(X, 'nonzero'):
X = sp.csr_matrix(X)
n_samples, n_features = X.shape
df = np.bincount(X.nonzero()[1])
if df.shape[0] < n_features:
# bincount might return fewer bins than there are features
df = np.concatenate([df, np.zeros(n_features - df.shape[0])])
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# avoid division by zeros for features that occur in all documents
self.idf_ = np.log(float(n_samples) / df) + 1.0
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf–idf representation
Parameters
----------
X: sparse matrix, [n_samples, n_features]
a matrix of term/token counts
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
expected_n_features = self.idf_.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
d = sp.lil_matrix((n_features, n_features))
d.setdiag(self.idf_)
# *= doesn't work
X = X * d
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
"""
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
analyzer='word', stop_words=None, token_pattern=ur"\b\w\w+\b",
min_n=1, max_n=1, max_df=1.0, max_features=None,
vocabulary=None, binary=False, dtype=long, norm='l2',
use_idf=True, smooth_idf=True, sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern, min_n=min_n,
max_n=max_n, max_df=max_df, max_features=max_features,
vocabulary=vocabulary, binary=False, dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
def fit(self, raw_documents):
"""Learn a conversion law from documents to array data"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the representation and return the vectors.
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: array, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform raw text documents to tf–idf vectors
Parameters
----------
raw_documents: iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors: sparse matrix, [n_samples, n_features]
"""
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy)
class Vectorizer(TfidfVectorizer):
"""Vectorizer is eprecated in 0.11, use TfidfVectorizer instead"""
def __init__(self, input='content', charset='utf-8',
charset_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
analyzer='word', stop_words=None, token_pattern=ur"\b\w\w+\b",
min_n=1, max_n=1, max_df=1.0, max_features=None,
vocabulary=None, binary=False, dtype=long, norm='l2',
use_idf=True, smooth_idf=True, sublinear_tf=False):
warnings.warn("Vectorizer is deprecated in 0.11 and will be removed"
" in 0.13. Please use TfidfVectorizer instead.",
category=DeprecationWarning)
super(Vectorizer, self).__init__(
input=input, charset=charset, charset_error=charset_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern, min_n=min_n,
max_n=max_n, max_df=max_df, max_features=max_features,
vocabulary=vocabulary, binary=False, dtype=dtype,
norm=norm, use_idf=use_idf, smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
| bsd-3-clause |
architecture-building-systems/CityEnergyAnalyst | cea/resources/radiation_daysim/daysim_main.py | 1 | 13038 | import json
import os
import numpy as np
import pandas as pd
import py4design.py2radiance as py2radiance
import py4design.py3dmodel.calculate as calculate
from py4design import py3dmodel
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca", "Kian Wee Chen"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "[email protected]"
__status__ = "Production"
from cea.constants import HOURS_IN_YEAR
from cea.resources.radiation_daysim.geometry_generator import BuildingGeometry
from cea import suppress_3rd_party_debug_loggers
suppress_3rd_party_debug_loggers()
def create_sensor_input_file(rad, chunk_n):
sensor_file_path = os.path.join(rad.data_folder_path, "points_" + str(chunk_n) + ".pts")
sensor_file = open(sensor_file_path, "w")
sensor_pts_data = py2radiance.write_rad.sensor_file(rad.sensor_positions, rad.sensor_normals)
sensor_file.write(sensor_pts_data)
sensor_file.close()
rad.sensor_file_path = sensor_file_path
def generate_sensor_surfaces(occface, wall_dim, roof_dim, srf_type, orientation, normal, intersection):
mid_pt = py3dmodel.calculate.face_midpt(occface)
location_pt = py3dmodel.modify.move_pt(mid_pt, normal, 0.01)
moved_oface = py3dmodel.fetch.topo2topotype(py3dmodel.modify.move(mid_pt, location_pt, occface))
if srf_type == 'roofs':
xdim = ydim = roof_dim
else:
xdim = ydim = wall_dim
# put it into occ and subdivide surfaces
sensor_surfaces = py3dmodel.construct.grid_face(moved_oface, xdim, ydim)
# calculate list of properties per surface
sensor_intersection = [intersection for x in sensor_surfaces]
sensor_dir = [normal for x in sensor_surfaces]
sensor_cord = [py3dmodel.calculate.face_midpt(x) for x in sensor_surfaces]
sensor_type = [srf_type for x in sensor_surfaces]
sensor_orientation = [orientation for x in sensor_surfaces]
sensor_area = [calculate.face_area(x) * (1.0 - scalar) for x, scalar in zip(sensor_surfaces, sensor_intersection)]
return sensor_dir, sensor_cord, sensor_type, sensor_area, sensor_orientation, sensor_intersection
def calc_sensors_building(building_geometry, grid_size):
sensor_dir_list = []
sensor_cord_list = []
sensor_type_list = []
sensor_area_list = []
sensor_orientation_list = []
sensor_intersection_list = []
surfaces_types = ['walls', 'windows', 'roofs']
sensor_vertical_grid_dim = grid_size["walls_grid"]
sensor_horizontal_grid_dim = grid_size["roof_grid"]
for srf_type in surfaces_types:
occface_list = getattr(building_geometry, srf_type)
if srf_type == 'roofs':
orientation_list = ['top'] * len(occface_list)
normals_list = [(0.0, 0.0, 1.0)] * len(occface_list)
interesection_list = [0] * len(occface_list)
elif srf_type == 'windows':
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = [0] * len(occface_list)
else:
orientation_list = getattr(building_geometry, "orientation_{srf_type}".format(srf_type=srf_type))
normals_list = getattr(building_geometry, "normals_{srf_type}".format(srf_type=srf_type))
interesection_list = getattr(building_geometry, "intersect_{srf_type}".format(srf_type=srf_type))
for orientation, normal, face, intersection in zip(orientation_list, normals_list, occface_list,
interesection_list):
sensor_dir, \
sensor_cord, \
sensor_type, \
sensor_area, \
sensor_orientation, \
sensor_intersection = generate_sensor_surfaces(face,
sensor_vertical_grid_dim,
sensor_horizontal_grid_dim,
srf_type,
orientation,
normal,
intersection)
sensor_intersection_list.extend(sensor_intersection)
sensor_dir_list.extend(sensor_dir)
sensor_cord_list.extend(sensor_cord)
sensor_type_list.extend(sensor_type)
sensor_area_list.extend(sensor_area)
sensor_orientation_list.extend(sensor_orientation)
return sensor_dir_list, sensor_cord_list, sensor_type_list, sensor_area_list, sensor_orientation_list, sensor_intersection_list
def calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir):
sensors_coords_zone = []
sensors_dir_zone = []
sensors_total_number_list = []
names_zone = []
sensors_code_zone = []
sensor_intersection_zone = []
for building_name in building_names:
building_geometry = BuildingGeometry.load(os.path.join(geometry_pickle_dir, 'zone', building_name))
# get sensors in the building
sensors_dir_building, \
sensors_coords_building, \
sensors_type_building, \
sensors_area_building, \
sensor_orientation_building, \
sensor_intersection_building = calc_sensors_building(building_geometry, grid_size)
# get the total number of sensors and store in lst
sensors_number = len(sensors_coords_building)
sensors_total_number_list.append(sensors_number)
sensors_code = ['srf' + str(x) for x in range(sensors_number)]
sensors_code_zone.append(sensors_code)
# get the total list of coordinates and directions to send to daysim
sensors_coords_zone.extend(sensors_coords_building)
sensors_dir_zone.extend(sensors_dir_building)
# get total list of intersections
sensor_intersection_zone.append(sensor_intersection_building)
# get the name of all buildings
names_zone.append(building_name)
# save sensors geometry result to disk
pd.DataFrame({'BUILDING': building_name,
'SURFACE': sensors_code,
'orientation': sensor_orientation_building,
'intersection': sensor_intersection_building,
'Xcoor': [x[0] for x in sensors_coords_building],
'Ycoor': [x[1] for x in sensors_coords_building],
'Zcoor': [x[2] for x in sensors_coords_building],
'Xdir': [x[0] for x in sensors_dir_building],
'Ydir': [x[1] for x in sensors_dir_building],
'Zdir': [x[2] for x in sensors_dir_building],
'AREA_m2': sensors_area_building,
'TYPE': sensors_type_building}).to_csv(locator.get_radiation_metadata(building_name), index=None)
return sensors_coords_zone, sensors_dir_zone, sensors_total_number_list, names_zone, sensors_code_zone, sensor_intersection_zone
def isolation_daysim(chunk_n, cea_daysim, building_names, locator, radiance_parameters, write_sensor_data, grid_size,
max_global, weatherfile, geometry_pickle_dir):
# initialize daysim project
daysim_project = cea_daysim.initialize_daysim_project('chunk_{n}'.format(n=chunk_n))
print('Creating daysim project in: {daysim_dir}'.format(daysim_dir=daysim_project.project_path))
# calculate sensors
print("Calculating and sending sensor points")
sensors_coords_zone, \
sensors_dir_zone, \
sensors_number_zone, \
names_zone, \
sensors_code_zone, \
sensor_intersection_zone = calc_sensors_zone(building_names, locator, grid_size, geometry_pickle_dir)
num_sensors = sum(sensors_number_zone)
daysim_project.create_sensor_input_file(sensors_coords_zone, sensors_dir_zone, num_sensors, "w/m2")
print("Starting Daysim simulation for buildings: {buildings}".format(buildings=names_zone))
print("Total number of sensors: {num_sensors}".format(num_sensors=num_sensors))
print('Writing radiance parameters')
daysim_project.write_radiance_parameters(radiance_parameters["rad_ab"], radiance_parameters["rad_ad"],
radiance_parameters["rad_as"], radiance_parameters["rad_ar"],
radiance_parameters["rad_aa"], radiance_parameters["rad_lr"],
radiance_parameters["rad_st"], radiance_parameters["rad_sj"],
radiance_parameters["rad_lw"], radiance_parameters["rad_dj"],
radiance_parameters["rad_ds"], radiance_parameters["rad_dr"],
radiance_parameters["rad_dp"])
print('Executing hourly solar isolation calculation')
daysim_project.execute_gen_dc()
daysim_project.execute_ds_illum()
print('Reading results...')
solar_res = daysim_project.eval_ill()
# check inconsistencies and replace by max value of weather file
print('Fixing inconsistencies, if any')
solar_res = np.clip(solar_res, a_min=0.0, a_max=max_global)
# Check if leap year and remove extra day
if solar_res.shape[1] == HOURS_IN_YEAR + 24:
print('Removing leap day')
leap_day_hours = range(1416, 1440)
solar_res = np.delete(solar_res, leap_day_hours, axis=1)
print("Writing results to disk")
index = 0
for building_name, \
sensors_number_building, \
sensor_code_building, \
sensor_intersection_building in zip(names_zone,
sensors_number_zone,
sensors_code_zone,
sensor_intersection_zone):
# select sensors data
selection_of_results = solar_res[index:index + sensors_number_building]
selection_of_results[np.array(sensor_intersection_building) == 1] = 0
items_sensor_name_and_result = dict(zip(sensor_code_building, selection_of_results.tolist()))
index = index + sensors_number_building
# create summary and save to disk
write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile)
if write_sensor_data:
write_sensor_results(building_name, items_sensor_name_and_result, locator)
# erase daysim folder to avoid conflicts after every iteration
print('Removing results folder')
daysim_project.cleanup_project()
def write_sensor_results(building_name, items_sensor_name_and_result, locator):
with open(locator.get_radiation_building_sensors(building_name), 'w') as outfile:
json.dump(items_sensor_name_and_result, outfile)
def write_aggregated_results(building_name, items_sensor_name_and_result, locator, weatherfile):
geometry = pd.read_csv(locator.get_radiation_metadata(building_name))
geometry['code'] = geometry['TYPE'] + '_' + geometry['orientation'] + '_kW'
solar_analysis_fields = ['windows_east_kW',
'windows_west_kW',
'windows_south_kW',
'windows_north_kW',
'walls_east_kW',
'walls_west_kW',
'walls_south_kW',
'walls_north_kW',
'roofs_top_kW']
solar_analysis_fields_area = ['windows_east_m2',
'windows_west_m2',
'windows_south_m2',
'windows_north_m2',
'walls_east_m2',
'walls_west_m2',
'walls_south_m2',
'walls_north_m2',
'roofs_top_m2']
dict_not_aggregated = {}
for field, field_area in zip(solar_analysis_fields, solar_analysis_fields_area):
select_sensors = geometry.loc[geometry['code'] == field].set_index('SURFACE')
area_m2 = select_sensors['AREA_m2'].sum()
array_field = np.array([select_sensors.loc[surface, 'AREA_m2'] *
np.array(items_sensor_name_and_result[surface])
for surface in select_sensors.index]).sum(axis=0)
dict_not_aggregated[field] = array_field / 1000 # in kWh
dict_not_aggregated[field_area] = area_m2
data_aggregated_kW = (pd.DataFrame(dict_not_aggregated)).round(2)
data_aggregated_kW["Date"] = weatherfile["date"]
data_aggregated_kW.set_index('Date', inplace=True)
data_aggregated_kW.to_csv(locator.get_radiation_building(building_name))
| mit |
ggianna/PyINSECT | source/documentModel/representations/DocumentNGramGaussNormGraph.py | 1 | 2228 | """
DocumentNGramSymWinGaussGraph.py
Created on May 23, 2017, 5:56 PM
"""
import networkx as nx
import pygraphviz as pgv
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
from DocumentNGramGraph import DocumentNGramGraph
import math
class DocumentNGramGaussNormGraph(DocumentNGramGraph):
# an extension of DocumentNGramGraph
# for symmetric windowing
_sigma = 1
_mean = 0
_a = 1/math.sqrt(2*math.pi)
_b = 2
def buildGraph(self,verbose = False, d=[]):
# set Data @class_variable
self.setData(d)
Data = self._Data
# build ngram
ng = self.build_ngram()
s = len(ng)
# calculate window
win = (3*self._Dwin)//2
# calculate gaussian params
self.set_dsf(self._Dwin//2,0)
# initialize graph
self._Graph = nx.Graph()
self._edges = set()
if(s>=2 and self._Dwin>=1):
# max possible window size (bounded by win)
o = min(win,s)+1
window = ng[1:o]
i = o
# first build the full window
for gram in ng[0:s-1]:
j = 1
for w in window:
# weigh in the correct way
self.addEdgeInc(gram,w,float(format(self.pdf(j),'.2f')))
j+=1
window.pop(0)
# if window's edge has reached
# it's the limit of ng stop
# appending
if i<s:
window.append(ng[i][:])
i+=1
if verbose:
self.GraphDraw(self._GPrintVerbose)
return self._Graph
# sets mean, sigma to support
# multiple pdf function calls
# without the need of recalculations
def set_dsf(self,sigma=1,mean=0):
self._sigma = sigma
self._mean = mean
self._a = 1.0/(sigma * math.sqrt(2*math.pi))
self._b = 2.0*(sigma**2)
print self._a
print self._b
# calculates given a distance and a mena given inside
# the
def pdf(self,x):
return self._a*math.exp(-(x*1.0)/self._b)
| apache-2.0 |
xzh86/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
tomlof/scikit-learn | examples/ensemble/plot_isolation_forest.py | 39 | 2361 | """
==========================================
IsolationForest example
==========================================
An example using IsolationForest for anomaly detection.
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a measure
of normality and our decision function.
Random partitioning produces noticeable shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path lengths
for particular samples, they are highly likely to be anomalies.
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Generate train data
X = 0.3 * rng.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rng.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# plot the line, the samples, and the nearest vectors to the plane
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("IsolationForest")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([b1, b2, c],
["training observations",
"new regular observations", "new abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
cdfassnacht/CodeCDF | python/wcs_1click.py | 1 | 6664 | """
Code to adjust the WCS of one or more fits files by assigning the
requested RA and Dec (given on the command line) to a selected pixel in
each fits file. The pixel is selected interactively.
A typical usage would be in fixing a set of files where the WCS does
not quite agree. An object that is in all of the images with know (RA,Dec)
coordinates is selected and the user interactively marks the position
of the star in each file.
Usage: python wcs_1click [ra] [dec] (-p [pixscale]) [fitsfile(s)]
-- or --
python wcs_1click.py [ra] [dec] (-p [pixscale]) -f [flatfile] [fitsfile(s)]
Input parameters:
ra - RA in decimal degrees
dec - Dec in decimal degrees
pixscale - [OPTIONAL] pixel scale in arcsec/pix
fmax - [OPTIONAL] maximum display value, in sigma above mean
Default value if this is not set is fmax=10
flatfile - [OPTIONAL] flat-field file to apply to the file(s) before
displaying it
fitsfile(s) - Either a single filename or a wildcard expression
e.g., m13*fits
"""
import sys
import numpy as np
from astropy import wcs
from astropy.io import fits as pf
from matplotlib import pyplot as plt
from specim import imfuncs as imf
""" Check command line syntax """
if len(sys.argv)<4:
print('')
print('Usage:')
print(' python wcs_1click.py [ra] [dec] (flag1 flag1val'
' flag2 flag2val...) [fitsfile(s)]')
print('')
print('Required inputs:')
print(' ra - RA in decimal degrees')
print(' dec - Dec in decimal degrees')
print(' fitsfile(s) - Either a single filename or a wildcard expression')
print(' e.g., m13*fits')
print('')
print('OPTIONAL FLAGS and associated parameters')
print(' -p [pixscale] - pixel scale in arcsec/pix')
print(' -flat [flatfile] - flat-field file to be applied to the input'
' fits files')
print(' -fmax [fmax] - maximum flux value, in sigma above mean,'
' for display')
print(' Default value: 10')
print('')
exit()
""" Set up variables for later use """
filestart = 3
pixscale = None
fmax = 10.
flat = None
flatfile = None
start_files = False
subimsize = 21
no_error = True
""" Parse the command line """
ra = float(sys.argv[1])
dec = float(sys.argv[2])
while start_files is False and no_error:
if sys.argv[filestart] == '-p':
try:
pixscale = float(sys.argv[filestart+1])
except ValueError:
msg = 'ERROR: pixel scale is not a floating point number'
no_error = False
except IndexError:
msg = 'ERROR: -p used but no pixel scale given'
no_error = False
filestart += 2
elif sys.argv[filestart] == '-flat':
try:
flatfile = sys.argv[filestart+1]
except IndexError:
msg = 'ERROR: -flat used but no flat-field file is given'
no_error = False
filestart += 2
elif sys.argv[filestart] == '-fmax':
try:
fmax = float(sys.argv[filestart+1])
except ValueError:
msg = 'ERROR: fmax is not a floating point number'
no_error = False
except IndexError:
msg = 'ERROR: -fmax used but no fmax value is given'
no_error = False
filestart += 2
else:
start_files = True
if no_error is not True:
print('')
print('%s' % msg)
print('')
exit()
""" Create the input file list """
if len(sys.argv) > filestart + 1:
files = sys.argv[filestart:]
else:
files = [sys.argv[filestart],]
crpix1 = np.zeros(len(files))
crpix2 = np.zeros(len(files))
""" Read in the flat-field data """
if flatfile is not None:
flat = pf.getdata(flatfile)
print('')
print('Using flat-field file: %s' % flatfile)
"""
Set up the pixel scale to use
The default is to use the WCS information in the file header, but if
the pixscale parameter has been set then its value overrides any
pixel scale information in the header
"""
if pixscale is not None:
pixscale /= 3600.
""" Loop through the input files, marking the object in each one """
for infile, crp1, crp2 in zip(files, crpix1, crpix2):
""" Open and display the image """
im1 = imf.Image(infile)
if flat is not None:
im1.data = im1.data.astype(float)
im1.data /= flat
im1.zoomsize = subimsize
im1.display(fmax=fmax, mode='xy', title=im1.infile)
""" Run the interactive zooming and marking """
im1.start_interactive()
plt.show()
""" Set the crpix values to the marked location """
if im1.dispim.xmark is not None:
crp1 = im1.dispim.xmark + 1
if im1.dispim.ymark is not None:
crp2 = im1.dispim.ymark + 1
"""
If there is no WCS information in the input file, create a base version
to be filled in later
"""
if im1['input'].wcsinfo is None:
im1['input'].wcsinfo = wcs.WCS(naxis=2)
im1['input'].wcsinfo.wcs.ctype = ['RA---TAN', 'DEC--TAN']
im1['input'].update_crpix((crp1, crp2), verbose=False)
im1['input'].update_crval((ra, dec), verbose=False)
im1.wcsinfo = im1['input'].wcsinfo
if flat is not None:
im1.data *= flat
im1.save(verbose=False)
del(im1)
"""
Report on the updated values
"""
print('')
print('File CRVAL1 CRVAL2 CRPIX1 CRPIX2 ')
print('------------------------ ----------- ----------- -------- --------')
for infile in files:
hdr = pf.getheader(infile)
f = infile[:-5]
print('%-24s %11.7f %+11.7f %8.2f %8.2f' % (f, ra, dec, hdr['crpix1'],
hdr['crpix2']))
del hdr
# """
# Second pass through the fits files, assigning the RA and Dec to the
# appropriate CRPIX, and setting the pixel scale if requested (pix scale
# not yet implemented)
# """
# if pixscale is not None:
# pixscale /= 3600.
# print ''
# print 'File CRVAL1 CRVAL2 CRPIX1 CRPIX2 '
# print '------------------------ ----------- ----------- -------- --------'
# for i in range(len(files)):
# hdu = pf.open(files[i], mode='update')
# hdr = hdu[0].header
# hdr['crval1'] = ra
# hdr['crval2'] = dec
# hdr['crpix1'] = crpix1[i]
# hdr['crpix2'] = crpix2[i]
# try:
# foo = hdr['ctype1']
# except KeyError:
# hdr['ctype1'] = 'RA---TAN'
# try:
# foo = hdr['ctype2']
# except KeyError:
# hdr['ctype2'] = 'DEC--TAN'
# hdu.flush()
# f = files[i][:-5]
# print '%-24s %11.7f %+11.7f %8.2f %8.2f' % (f,ra,dec,crpix1[i],crpix2[i])
| mit |
will-moore/openmicroscopy | components/tools/OmeroPy/test/unit/test_jvmcfg.py | 13 | 7583 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of the automatic JVM setting logic for OMERO startup.
"""
import pytest
from omero.config import ConfigXml, xml
from omero.install.jvmcfg import adjust_settings
from omero.install.jvmcfg import ManualStrategy
from omero.install.jvmcfg import PercentStrategy
from omero.install.jvmcfg import Settings
from omero.install.jvmcfg import Strategy
from omero.install.jvmcfg import strip_dict
from omero.install.jvmcfg import usage_charts
from omero.util.temp_files import create_path
from path import path
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import tostring
from xml.etree.ElementTree import XML
from test.unit.test_config import initial
def write_config(data):
p = create_path()
i = initial()
for k, v in data.items():
for x in i[0:2]: # __ACTIVE__ & default
SubElement(x, "property", name=k, value=v)
string = tostring(i, 'utf-8')
txt = xml.dom.minidom.parseString(string).toprettyxml(" ", "\n", None)
p.write_text(txt)
return p
class TestMemoryStrip(object):
def test_1(self):
rv = strip_dict({"a.b": "c"}, prefix="a")
assert {"b": "c"} == rv
def test_2(self):
rv = strip_dict({"a.b.c": "d"}, prefix="a.b")
assert rv["c"] == "d"
def test_3(self):
rv = strip_dict({
"omero.jvmcfg.foo": "a",
"something.else": "b"})
assert rv["foo"] == "a"
assert "something.else" not in rv
@pytest.mark.parametrize("input,output", (
({"omero.jvmcfg.heap_size.blitz": "1g"}, {"heap_size": "1g"}),
))
def test_4(self, input, output):
p = write_config(input)
config = ConfigXml(filename=str(p), env_config="default")
try:
m = config.as_map()
s = strip_dict(m, suffix="blitz")
assert s == output
finally:
config.close()
def test_5(self):
rv = strip_dict({
"omero.jvmcfg.a.blitz": "b",
}, suffix="blitz")
assert rv["a"] == "b"
class TestSettings(object):
def test_initial(self):
s = Settings()
assert s.perm_gen == "128m"
assert s.heap_dump == "off"
assert s.heap_size == "512m"
def test_explicit(self):
s = Settings({
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_defaults(self):
s = Settings({}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_both(self):
s = Settings({
"perm_gen": "aaa",
"heap_dump": "bbb",
"heap_size": "ccc",
}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "aaa"
assert s.heap_dump == "bbb"
assert s.heap_size == "ccc"
class TestStrategy(object):
def test_no_instantiate(self):
with pytest.raises(Exception):
Strategy("blitz")
def test_hard_coded(self):
strategy = ManualStrategy("blitz")
settings = strategy.get_memory_settings()
assert settings == [
"-Xmx512m",
"-XX:MaxPermSize=128m",
"-XX:+IgnoreUnrecognizedVMOptions",
]
def test_percent_usage(self):
strategy = PercentStrategy("blitz")
table = list(strategy.usage_table(15, 16))[0]
assert table[0] == 2**15
assert table[1] == 2**15*15/100
def test_heap_dump_on(self):
settings = Settings({"heap_dump": "on"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert not append
def test_heap_dump_tmp(self):
settings = Settings({"heap_dump": "tmp"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert "HeapDumpPath" in "".join(append)
class AdjustFixture(object):
def __init__(self, input, output, name, **kwargs):
self.input = input
self.output = output
self.name = name
self.kwargs = kwargs
def validate(self, rv):
for k, v in self.output.items():
assert k in rv
found = rv[k]
found.pop(0) # settings
assert v == found, "%s.%s: %s <> %s" % (self.name, k,
v, found)
import json
f = open(__file__[:-3] + ".json", "r")
data = json.load(f)
AFS = []
for x in data:
AFS.append(AdjustFixture(x["input"], x["output"], x["name"]))
def template_xml():
templates = path(__file__) / ".." / ".." / ".."
templates = templates / ".." / ".." / ".."
templates = templates / "etc" / "templates" / "grid" / "templates.xml"
templates = templates.abspath()
return XML(templates.text())
class TestAdjustStrategy(object):
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_adjust(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
xml = template_xml()
config = ConfigXml(filename=str(p), env_config="default")
try:
rv = adjust_settings(config, xml, **fixture.kwargs)
fixture.validate(rv)
finally:
config.close()
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_12527(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
old_templates = path(__file__).dirname() / "old_templates.xml"
xml = XML(old_templates.abspath().text())
config = ConfigXml(filename=str(p), env_config="default")
with pytest.raises(Exception):
adjust_settings(config, xml, **fixture.kwargs)
class TestChart(object):
def test_percent_chart(self):
try:
usage_charts("target/charts.png")
except ImportError:
# Requires matplotlib, etc
pass
| gpl-2.0 |
ccauet/scikit-optimize | skopt/tests/test_utils.py | 1 | 2315 | import pytest
import tempfile
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from skopt import gp_minimize
from skopt import load
from skopt import dump
from skopt.benchmarks import bench1
from skopt.benchmarks import bench3
from skopt.learning import ExtraTreesRegressor
from skopt.optimizer import Optimizer
def check_optimization_results_equality(res_1, res_2):
# Check if the results objects have the same keys
assert_equal(sorted(res_1.keys()), sorted(res_2.keys()))
# Shallow check of the main optimization results
assert_array_equal(res_1.x, res_2.x)
assert_array_equal(res_1.x_iters, res_2.x_iters)
assert_array_equal(res_1.fun, res_2.fun)
assert_array_equal(res_1.func_vals, res_2.func_vals)
@pytest.mark.fast_test
def test_dump_and_load():
res = gp_minimize(bench3,
[(-2.0, 2.0)],
x0=[0.],
acq_func="LCB",
n_calls=2,
n_random_starts=0,
random_state=1)
# Test normal dumping and loading
with tempfile.TemporaryFile() as f:
dump(res, f)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert_true("func" in res_loaded.specs["args"])
# Test dumping without objective function
with tempfile.TemporaryFile() as f:
dump(res, f, store_objective=False)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert_true(not ("func" in res_loaded.specs["args"]))
# Delete the objective function and dump the modified object
del res.specs["args"]["func"]
with tempfile.TemporaryFile() as f:
dump(res, f, store_objective=False)
res_loaded = load(f)
check_optimization_results_equality(res, res_loaded)
assert_true(not ("func" in res_loaded.specs["args"]))
@pytest.mark.fast_test
def test_dump_and_load_optimizer():
base_estimator = ExtraTreesRegressor(random_state=2)
opt = Optimizer([(-2.0, 2.0)], base_estimator, n_random_starts=1,
acq_optimizer="sampling")
opt.run(bench1, n_iter=3)
with tempfile.TemporaryFile() as f:
dump(opt, f)
load(f)
| bsd-3-clause |
LodewijkSikkel/paparazzi | sw/airborne/test/ahrs/ahrs_utils.py | 86 | 4923 | #! /usr/bin/env python
# Copyright (C) 2011 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def run_simulation(ahrs_type, build_opt, traj_nb):
print("\nBuilding ahrs")
args = ["make", "clean", "run_ahrs_on_synth", "AHRS_TYPE=AHRS_TYPE_" + ahrs_type] + build_opt
#print(args)
p = subprocess.Popen(args=args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False)
outputlines = p.stdout.readlines()
p.wait()
for i in outputlines:
print(" # " + i, end=' ')
print()
print("Running simulation")
print(" using traj " + str(traj_nb))
p = subprocess.Popen(args=["./run_ahrs_on_synth", str(traj_nb)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=False)
outputlines = p.stdout.readlines()
p.wait()
# for i in outputlines:
# print(" "+i, end=' ')
# print("\n")
ahrs_data_type = [('time', 'float32'),
('phi_true', 'float32'), ('theta_true', 'float32'), ('psi_true', 'float32'),
('p_true', 'float32'), ('q_true', 'float32'), ('r_true', 'float32'),
('bp_true', 'float32'), ('bq_true', 'float32'), ('br_true', 'float32'),
('phi_ahrs', 'float32'), ('theta_ahrs', 'float32'), ('psi_ahrs', 'float32'),
('p_ahrs', 'float32'), ('q_ahrs', 'float32'), ('r_ahrs', 'float32'),
('bp_ahrs', 'float32'), ('bq_ahrs', 'float32'), ('br_ahrs', 'float32')]
mydescr = np.dtype(ahrs_data_type)
data = [[] for dummy in xrange(len(mydescr))]
# import code; code.interact(local=locals())
for line in outputlines:
if line.startswith("#"):
print(" " + line, end=' ')
else:
fields = line.strip().split(' ')
#print(fields)
for i, number in enumerate(fields):
data[i].append(number)
print()
for i in xrange(len(mydescr)):
data[i] = np.cast[mydescr[i]](data[i])
return np.rec.array(data, dtype=mydescr)
def plot_simulation_results(plot_true_state, lsty, label, sim_res):
print("Plotting Results")
# f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_ahrs, lsty, label=label)
plt.ylabel('degres')
plt.title('phi')
plt.legend()
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_ahrs, lsty)
plt.title('theta')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_ahrs, lsty)
plt.title('psi')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_ahrs, lsty)
plt.ylabel('degres/s')
plt.title('p')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_ahrs, lsty)
plt.title('q')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_ahrs, lsty)
plt.title('r')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_ahrs, lsty)
plt.ylabel('degres/s')
plt.xlabel('time in s')
plt.title('bp')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_ahrs, lsty)
plt.xlabel('time in s')
plt.title('bq')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_ahrs, lsty)
plt.xlabel('time in s')
plt.title('br')
if plot_true_state:
plt.subplot(3, 3, 1)
plt.plot(sim_res.time, sim_res.phi_true, 'r--')
plt.subplot(3, 3, 2)
plt.plot(sim_res.time, sim_res.theta_true, 'r--')
plt.subplot(3, 3, 3)
plt.plot(sim_res.time, sim_res.psi_true, 'r--')
plt.subplot(3, 3, 4)
plt.plot(sim_res.time, sim_res.p_true, 'r--')
plt.subplot(3, 3, 5)
plt.plot(sim_res.time, sim_res.q_true, 'r--')
plt.subplot(3, 3, 6)
plt.plot(sim_res.time, sim_res.r_true, 'r--')
plt.subplot(3, 3, 7)
plt.plot(sim_res.time, sim_res.bp_true, 'r--')
plt.subplot(3, 3, 8)
plt.plot(sim_res.time, sim_res.bq_true, 'r--')
plt.subplot(3, 3, 9)
plt.plot(sim_res.time, sim_res.br_true, 'r--')
def show_plot():
plt.show()
| gpl-2.0 |
amolkahat/pandas | pandas/tests/indexes/period/test_period_range.py | 11 | 3646 | import pytest
import pandas.util.testing as tm
from pandas import date_range, NaT, period_range, Period, PeriodIndex
class TestPeriodRange(object):
@pytest.mark.parametrize('freq', ['D', 'W', 'M', 'Q', 'A'])
def test_construction_from_string(self, freq):
# non-empty
expected = date_range(start='2017-01-01', periods=5,
freq=freq, name='foo').to_period()
start, end = str(expected[0]), str(expected[-1])
result = period_range(start=start, end=end, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(start=start, periods=5, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=5, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
# empty
expected = PeriodIndex([], freq=freq, name='foo')
result = period_range(start=start, periods=0, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq=freq, name='foo')
tm.assert_index_equal(result, expected)
def test_construction_from_period(self):
# upsampling
start, end = Period('2017Q1', freq='Q'), Period('2018Q1', freq='Q')
expected = date_range(start='2017-03-31', end='2018-03-31', freq='M',
name='foo').to_period()
result = period_range(start=start, end=end, freq='M', name='foo')
tm.assert_index_equal(result, expected)
# downsampling
start, end = Period('2017-1', freq='M'), Period('2019-12', freq='M')
expected = date_range(start='2017-01-31', end='2019-12-31', freq='Q',
name='foo').to_period()
result = period_range(start=start, end=end, freq='Q', name='foo')
tm.assert_index_equal(result, expected)
# empty
expected = PeriodIndex([], freq='W', name='foo')
result = period_range(start=start, periods=0, freq='W', name='foo')
tm.assert_index_equal(result, expected)
result = period_range(end=end, periods=0, freq='W', name='foo')
tm.assert_index_equal(result, expected)
result = period_range(start=end, end=start, freq='W', name='foo')
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
period_range(start='2017Q1')
with tm.assert_raises_regex(ValueError, msg):
period_range(end='2017Q1')
with tm.assert_raises_regex(ValueError, msg):
period_range(periods=5)
with tm.assert_raises_regex(ValueError, msg):
period_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
period_range(start='2017Q1', end='2018Q1', periods=8, freq='Q')
# start/end NaT
msg = 'start and end must not be NaT'
with tm.assert_raises_regex(ValueError, msg):
period_range(start=NaT, end='2018Q1')
with tm.assert_raises_regex(ValueError, msg):
period_range(start='2017Q1', end=NaT)
# invalid periods param
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
period_range(start='2017Q1', periods='foo')
| bsd-3-clause |
skulumani/asteroid_dumbbell | visualization/hdf_plotter.py | 1 | 26528 | """Generate plots from a h5py simulation output
"""
import cv2
from visualization import opencv, plotting
from dynamics import asteroid, dumbbell, controller
from kinematics import attitude
import argparse
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pdb
import h5py
def printname(name):
print(name)
def sift_flann_matching_image(img1, img2, ratio, plot=False,
filename='/tmp/test.png', save_fig=False):
"""Need full color images
"""
kp1, des1, _ = opencv.sift_image(img1)
kp2, des2, _ = opencv.sift_image(img2)
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50) # or empty dictionary
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# draw only good matches by creating a mask
matchesMask = [[0, 0] for i in range(len(matches))]
# ratio test
for i, (m, n) in enumerate(matches):
if m.distance < ratio * n.distance:
matchesMask[i] = [1, 0]
if plot:
draw_params = dict(matchColor = (0, 255, 0),
singlePointColor = (255, 0, 0),
matchesMask = matchesMask,
flags = 0)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
fig, ax = plt.subplots(1)
ax.imshow(img3)
ax.axis('off')
if save_fig:
plt.imsave(filename, img3, format='png')
plt.show()
return matches
def plot_keyframe_original(time, i_state, R_ast2int, R_bcam2i, save_fig=False,
fwidth=1, filename='/tmp/estimate.eps',
kf_path='./data/itokawa_landing/cycles_high_7200_keyframe_poses_remove_first_kf.txt'):
"""Plot keyframe trajectory without any transformation
"""
# convert inertial position into asteriod fixed frame
inertial_pos = i_state[:, 0:3]
asteroid_pos = np.zeros_like(inertial_pos)
for ii, (ip, Ra2i) in enumerate(zip(inertial_pos, R_ast2int)):
asteroid_pos[ii, :] = Ra2i.reshape((3,3)).T.dot(ip)
# first determine the scale of the keyframe translations
kf_data = np.loadtxt(kf_path)
kf_time = kf_data[:, 0].astype(dtype='int') # time of keyframe, matches image/time vector
kf_traj = kf_data[:, 1:4] # postiion of each frame relative to the first
kf_quat = kf_data[:, 4:8] # rotation from first keyframe to current
Rcam2ast = attitude.rot3(np.deg2rad(-90)).dot(np.array([[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]))
kf_traj = Rcam2ast.dot(kf_traj.T).T
# need R at time of first frame and then the asteroid to inertial - dumbbell to ast needed
R0_s2i = i_state[653, 6:15].reshape((3, 3))
R0_a2i = R_ast2int[653, :].reshape((3, 3))
R0_s2a = (R0_a2i.T.dot(R0_s2i))
kf_traj = R0_s2a.dot(kf_traj.T).T
kf_R_first2cur = np.zeros((len(kf_time), 9))
# transform each quaternion to a rotation matrix
for ii,q in enumerate(kf_quat):
kf_R_first2cur[ii, :] = R0_s2a.dot(Rcam2ast.dot(attitude.quattodcm(q))).reshape(-1)
# rotate each keyframe point by the corresponding angle of asteroid
# for ii,index in enumerate(kf_time):
# kf_traj[ii,:] = R_ast2int[ii,:].reshape((3,3)).T.dot(kf_traj[ii,:])
# kf_R_first2cur[ii, :] = R_ast2int[ii, :].reshape((3, 3)).T.dot(kf_R_first2cur[ii, :].reshape((3,3))).reshape(-1)
# determine scale of translation between keyframe points
kf_diff = np.diff(kf_traj, axis=0)
kf_scale = np.sqrt(np.sum(kf_diff ** 2, axis=1))
# find true positions at the same time as keyframes
kf_traj_true = asteroid_pos[kf_time[0]:kf_time[-1], :]
kf_scale_true = np.sqrt(np.sum(kf_traj_true ** 2, axis=1))
scale = kf_scale_true[0]
Rb2i = R_bcam2i[kf_time[0], :].reshape((3,3))
Rb2a = R_ast2int[kf_time[0], :].reshape((3, 3)).T.dot(Rb2i)
# scale and translate
kf_traj = scale * kf_traj + asteroid_pos[kf_time[0], :]
# translate kf_traj
difference = kf_traj[0, :] - kf_traj_true[0, :]
kf_traj = kf_traj - difference
# plot keyframe motion without any modifications
kf_orig_fig = plt.figure()
kf_orig_ax = axes3d.Axes3D(kf_orig_fig)
kf_orig_ax.set_zlim3d(-1, 1)
kf_orig_ax.set_xlim3d(-0, 2)
kf_orig_ax.set_ylim3d(-1, 1)
kf_orig_ax.plot(kf_traj[:,0], kf_traj[:, 1], kf_traj[:, 2], 'b-*')
# plot the viewing direction
length = 0.3
for ii, R in enumerate(kf_R_first2cur):
view_axis = R.reshape((3,3))[:, 2]
kf_orig_ax.plot([kf_traj[ii, 0], kf_traj[ii, 0] + length * view_axis[0]],
[kf_traj[ii, 1], kf_traj[ii, 1] + length * view_axis[1]],
[kf_traj[ii, 2], kf_traj[ii, 2] + length * view_axis[2]],
'r')
kf_orig_ax.plot(kf_traj_true[:, 0], kf_traj_true[:, 1], kf_traj_true[:, 2], 'r')
kf_orig_ax.set_title('Keyframe Original')
kf_orig_ax.set_xlabel('X')
kf_orig_ax.set_ylabel('Y')
kf_orig_ax.set_zlabel('Z')
# plot the components
kf_comp_fig, kf_comp_ax = plt.subplots(3, 1, figsize=plotting.figsize(1), sharex=True)
kf_comp_ax[0].plot(kf_time, kf_traj[:, 0], 'b-*', label='Estimate')
kf_comp_ax[0].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 0], 'r-', label='True')
kf_comp_ax[0].set_ylim(0, 3)
kf_comp_ax[0].set_ylabel(r'$X$ (km)')
kf_comp_ax[0].grid()
kf_comp_ax[1].plot(kf_time, kf_traj[:, 1], 'b-*', label='Estimate')
kf_comp_ax[1].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 1], 'r-', label='True')
kf_comp_ax[1].set_ylim(-2, 1)
kf_comp_ax[1].set_ylabel(r'$Y$ (km)')
kf_comp_ax[1].grid()
kf_comp_ax[2].plot(kf_time, kf_traj[:, 2], 'b-*', label='Estimate')
kf_comp_ax[2].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 2], 'r-', label='True')
kf_comp_ax[2].set_ylim(-0.5, 2.5)
kf_comp_ax[2].set_ylabel(r'$Z$ (km)')
kf_comp_ax[2].grid()
kf_comp_ax[2].set_xlabel('Time (sec)')
plt.legend(loc='best')
if save_fig:
plt.figure(kf_comp_fig.number)
plt.savefig(filename)
plt.show()
def plot_keyframe_original_asteroid(time, state, R_bcam2a, save_fig=False,
fwidth=1, filename='/tmp/estimate.eps',
kf_path='./data/asteroid_circumnavigate/asteroid_fixed_circumnavigate.txt'):
"""Plot keyframe trajectory and scale to match truth
The asteroid is assumed to be not rotating. Therefore the input state is
actually already in the asteroid frame
"""
# convert inertial position into asteriod fixed frame
asteroid_pos = state[:, 0:3]
# first determine the scale of the keyframe translations
kf_data = np.loadtxt(kf_path)
kf_time = kf_data[:, 0].astype(dtype='int') # time of keyframe, matches image/time vector
kf_traj = kf_data[:, 1:4] # postiion of each frame relative to the first
kf_quat = kf_data[:, 4:8] # rotation from first keyframe to current
Rcam2ast = attitude.rot3(np.deg2rad(-90)).dot(np.array([[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]))
kf_traj = Rcam2ast.dot(kf_traj.T).T
# need R at time of first frame and then the asteroid to inertial - dumbbell to ast needed
R0_s2i = state[kf_time[0], 6:15].reshape((3, 3))
R0_s2a = (R0_s2i)
kf_traj = R0_s2a.dot(kf_traj.T).T
kf_R_first2cur = np.zeros((len(kf_time), 9))
# transform each quaternion to a rotation matrix
for ii,q in enumerate(kf_quat):
kf_R_first2cur[ii, :] = R0_s2a.dot(Rcam2ast.dot(attitude.quattodcm(q))).reshape(-1)
# determine scale of translation between keyframe points
kf_diff = np.diff(kf_traj, axis=0)
kf_scale = np.sqrt(np.sum(kf_diff ** 2, axis=1))
# find true positions at the same time as keyframes
kf_traj_true = asteroid_pos[kf_time[0]:kf_time[-1], :]
kf_scale_true = np.sqrt(np.sum(kf_traj_true ** 2, axis=1))
scale = kf_scale_true[0]
Rb2a = R_bcam2a[kf_time[0], :].reshape((3,3))
# scale and translate
kf_traj = scale * kf_traj + asteroid_pos[kf_time[0], :]
# translate kf_traj
difference = kf_traj[0, :] - kf_traj_true[0, :]
kf_traj = kf_traj - difference
# plot keyframe motion without any modifications
kf_orig_fig = plt.figure()
kf_orig_ax = axes3d.Axes3D(kf_orig_fig)
kf_orig_ax.set_zlim3d(-1, 1)
kf_orig_ax.set_xlim3d(-4, 4)
kf_orig_ax.set_ylim3d(-4, 4)
kf_orig_ax.plot(kf_traj[:,0], kf_traj[:, 1], kf_traj[:, 2], 'b-*')
# plot the viewing direction
length = 0.3
for ii, R in enumerate(kf_R_first2cur):
view_axis = R.reshape((3,3))[:, 2]
kf_orig_ax.plot([kf_traj[ii, 0], kf_traj[ii, 0] + length * view_axis[0]],
[kf_traj[ii, 1], kf_traj[ii, 1] + length * view_axis[1]],
[kf_traj[ii, 2], kf_traj[ii, 2] + length * view_axis[2]],
'r')
kf_orig_ax.plot(kf_traj_true[:, 0], kf_traj_true[:, 1], kf_traj_true[:, 2], 'r')
kf_orig_ax.set_title('Keyframe Original')
kf_orig_ax.set_xlabel('X')
kf_orig_ax.set_ylabel('Y')
kf_orig_ax.set_zlabel('Z')
# plot the components
kf_comp_fig, kf_comp_ax = plt.subplots(3, 1, figsize=plotting.figsize(1), sharex=True)
kf_comp_ax[0].plot(kf_time, kf_traj[:, 0], 'b-*', label='Estimate')
kf_comp_ax[0].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 0], 'r-', label='True')
kf_comp_ax[0].set_ylim(-4, 4)
kf_comp_ax[0].set_ylabel(r'$X$ (km)')
kf_comp_ax[0].grid()
kf_comp_ax[1].plot(kf_time, kf_traj[:, 1], 'b-*', label='Estimate')
kf_comp_ax[1].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 1], 'r-', label='True')
kf_comp_ax[1].set_ylim(-4, 4)
kf_comp_ax[1].set_ylabel(r'$Y$ (km)')
kf_comp_ax[1].grid()
kf_comp_ax[2].plot(kf_time, kf_traj[:, 2], 'b-*', label='Estimate')
kf_comp_ax[2].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 2], 'r-', label='True')
kf_comp_ax[2].set_ylim(-1, 1)
kf_comp_ax[2].set_ylabel(r'$Z$ (km)')
kf_comp_ax[2].grid()
kf_comp_ax[2].set_xlabel('Time (sec)')
plt.legend(loc='best')
# compute the difference in the components
traj_diff = np.absolute(asteroid_pos[kf_time, :] - kf_traj)
kf_diff_fig, kf_diff_ax = plt.subplots(3, 1, figsize=plotting.figsize(1), sharex=True)
kf_diff_ax[0].plot(kf_time, traj_diff[:, 0], 'b-*')
kf_diff_ax[0].set_ylabel(r'$X$ (km)')
kf_diff_ax[0].grid()
kf_diff_ax[1].plot(kf_time, traj_diff[:, 1], 'b-*')
kf_diff_ax[1].set_ylabel(r'$Y$ (km)')
kf_diff_ax[1].grid()
kf_diff_ax[2].plot(kf_time, traj_diff[:, 2], 'b-*')
kf_diff_ax[2].set_ylabel(r'$Z$ (km)')
kf_diff_ax[2].grid()
kf_diff_ax[2].set_xlabel(r'Time (sec)')
kf_diff_ax[0].set_title('Difference between ORBSLAM estimate and truth')
if save_fig:
plt.figure(kf_comp_fig.number)
plt.savefig(filename)
plt.show()
def plot_keyframe_original_lissajous(time, state, R_bcam2a, save_fig=False,
fwidth=1, filename='/tmp/estimate.eps',
kf_path='./data/asteroid_circumnavigate/lissajous.txt'):
"""Plot keyframe trajectory and scale to match truth
The asteroid is assumed to be not rotating. Therefore the input state is
actually already in the asteroid frame
"""
# convert inertial position into asteriod fixed frame
asteroid_pos = state[:, 0:3]
# first determine the scale of the keyframe translations
kf_data = np.loadtxt(kf_path)
kf_time = kf_data[:, 0].astype(dtype='int') # time of keyframe, matches image/time vector
kf_traj = kf_data[:, 1:4] # postiion of each frame relative to the first
kf_quat = kf_data[:, 4:8] # rotation from first keyframe to current
Rcam2ast = attitude.rot2(np.deg2rad(0)).dot(attitude.rot3(np.deg2rad(-90)).dot(np.array([[1, 0, 0],
[0, 0, 1],
[0, 1, 0]])))
kf_traj = Rcam2ast.dot(kf_traj.T).T
# need R at time of first frame and then the asteroid to inertial - dumbbell to ast needed
R0_s2i = state[kf_time[0], 6:15].reshape((3, 3))
R0_s2a = (R0_s2i)
kf_traj = R0_s2a.dot(kf_traj.T).T
kf_R_first2cur = np.zeros((len(kf_time), 9))
# transform each quaternion to a rotation matrix
for ii,q in enumerate(kf_quat):
kf_R_first2cur[ii, :] = R0_s2a.dot(Rcam2ast.dot(attitude.quattodcm(q))).reshape(-1)
# determine scale of translation between keyframe points
kf_diff = np.diff(kf_traj, axis=0)
kf_scale = np.sqrt(np.sum(kf_diff ** 2, axis=1))
# find true positions at the same time as keyframes
kf_traj_true = asteroid_pos[kf_time[0]:kf_time[-1], :]
kf_scale_true = np.sqrt(np.sum(kf_traj_true ** 2, axis=1))
scale = kf_scale_true[0]
Rb2a = R_bcam2a[kf_time[0], :].reshape((3,3))
# scale and translate
kf_traj = scale * kf_traj + asteroid_pos[kf_time[0], :]
# translate kf_traj
difference = kf_traj[0, :] - kf_traj_true[0, :]
kf_traj = kf_traj - difference
# plot keyframe motion without any modifications
kf_orig_fig = plt.figure()
kf_orig_ax = axes3d.Axes3D(kf_orig_fig)
kf_orig_ax.set_zlim3d(-5, 5)
kf_orig_ax.set_xlim3d(-5, 5)
kf_orig_ax.set_ylim3d(-5, 5)
kf_orig_ax.plot(kf_traj[:,0], kf_traj[:, 1], kf_traj[:, 2], 'b-*')
# plot the viewing direction
length = 0.3
for ii, R in enumerate(kf_R_first2cur):
view_axis = R.reshape((3,3))[:, 2]
kf_orig_ax.plot([kf_traj[ii, 0], kf_traj[ii, 0] + length * view_axis[0]],
[kf_traj[ii, 1], kf_traj[ii, 1] + length * view_axis[1]],
[kf_traj[ii, 2], kf_traj[ii, 2] + length * view_axis[2]],
'r')
kf_orig_ax.plot(kf_traj_true[:, 0], kf_traj_true[:, 1], kf_traj_true[:, 2], 'r')
kf_orig_ax.set_title('Keyframe Original')
kf_orig_ax.set_xlabel('X')
kf_orig_ax.set_ylabel('Y')
kf_orig_ax.set_zlabel('Z')
# plot the components
kf_comp_fig, kf_comp_ax = plt.subplots(3, 1, figsize=plotting.figsize(1), sharex=True)
kf_comp_ax[0].plot(kf_time, kf_traj[:, 0], 'b-*', label='Estimate')
kf_comp_ax[0].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 0], 'r-', label='True')
kf_comp_ax[0].set_ylim(-5, 5)
kf_comp_ax[0].set_ylabel(r'$X$ (km)')
kf_comp_ax[0].grid()
kf_comp_ax[1].plot(kf_time, kf_traj[:, 1], 'b-*', label='Estimate')
kf_comp_ax[1].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 1], 'r-', label='True')
kf_comp_ax[1].set_ylim(-5, 5)
kf_comp_ax[1].set_ylabel(r'$Y$ (km)')
kf_comp_ax[1].grid()
kf_comp_ax[2].plot(kf_time, kf_traj[:, 2], 'b-*', label='Estimate')
kf_comp_ax[2].plot(time[kf_time[0]:kf_time[-1]], kf_traj_true[:, 2], 'r-', label='True')
kf_comp_ax[2].set_ylim(-5, 5)
kf_comp_ax[2].set_ylabel(r'$Z$ (km)')
kf_comp_ax[2].grid()
kf_comp_ax[2].set_xlabel('Time (sec)')
plt.legend(loc='best')
# compute the difference in the components
traj_diff = np.absolute(asteroid_pos[kf_time, :] - kf_traj)
kf_diff_fig, kf_diff_ax = plt.subplots(3, 1, figsize=plotting.figsize(1), sharex=True)
kf_diff_ax[0].plot(kf_time, traj_diff[:, 0], 'b-*')
kf_diff_ax[0].set_ylabel(r'$X$ (km)')
kf_diff_ax[0].grid()
kf_diff_ax[1].plot(kf_time, traj_diff[:, 1], 'b-*')
kf_diff_ax[1].set_ylabel(r'$Y$ (km)')
kf_diff_ax[1].grid()
kf_diff_ax[2].plot(kf_time, traj_diff[:, 2], 'b-*')
kf_diff_ax[2].set_ylabel(r'$Z$ (km)')
kf_diff_ax[2].grid()
kf_diff_ax[2].set_xlabel(r'Time (sec)')
kf_diff_ax[0].set_title('Difference between ORBSLAM estimate and truth')
if save_fig:
plt.figure(kf_comp_fig.number)
plt.savefig(filename)
plt.show()
def plot_keyframe_trajectory(time, i_state, R_ast2int, R_bcam2i, save_fig=False,
fwidth=1, filename='/tmp/estimate.eps',
kf_path='./data/itokawa_landing/cycles_high_7200_keyframe_poses.txt'):
"""Read the keyframe data and transform it to match my stuff
"""
# convert inertial position into asteriod fixed frame
inertial_pos = i_state[:, 0:3]
asteroid_pos = np.zeros_like(inertial_pos)
for ii, (ip, Ra2i) in enumerate(zip(inertial_pos, R_ast2int)):
asteroid_pos[ii, :] = Ra2i.reshape((3,3)).T.dot(ip)
# first determine the scale of the keyframe translations
kf_data = np.loadtxt(kf_path)
kf_time = kf_data[:, 0].astype(dtype='int') # time of keyframe, matches image/time vector
kf_traj = kf_data[:, 1:4] # postiion of each frame relative to the first
kf_quat = kf_data[:, 4:8] # rotation from first keyframe to current
pdb.set_trace()
Rcam2ast = np.array([[1, 0, 0],
[0, 0, 1],
[0, 1, 0]])
kf_traj = Rcam2ast.dot(kf_traj.T).T
# need R at time of first frame and then the asteroid to inertial - dumbbell to ast needed
R0_s2i = i_state[kf_time[0], 6:15].reshape((3, 3))
R0_a2i = R_ast2int[kf_time[0], :].reshape((3, 3))
R0_s2a = (R0_a2i.T.dot(R0_s2i))
kf_traj = R0_s2a.dot(kf_traj.T).T
kf_R_first2cur = np.zeros((len(kf_time), 9))
# transform each quaternion to a rotation matrix
# for ii,q in enumerate(kf_quat):
# kf_R_first2cur[ii, :] = R0_s2a.dot(Rcam2ast.dot(attitude.quattodcm(q))).reshape(-1)
# rotate each keyframe point by the corresponding angle of asteroid
for ii,index in enumerate(kf_time):
# kf_traj[ii,:] = R_ast2int[ii,:].reshape((3,3)).T.dot(kf_traj[ii,:])
kf_R_first2cur[ii, :] = R_ast2int[ii, :].reshape((3, 3)).T.dot(kf_R_first2cur[ii, :].reshape((3,3))).reshape(-1)
# determine scale of translation between keyframe points
kf_diff = np.diff(kf_traj, axis=0)
kf_scale = np.sqrt(np.sum(kf_diff ** 2, axis=1))
# find true positions at the same time as keyframes
kf_traj_true = asteroid_pos[kf_time, :]
kf_scale_true = np.sqrt(np.sum(kf_traj_true ** 2, axis=1))
scale = kf_scale_true[0]
Rb2i = R_bcam2i[kf_time[0], :].reshape((3,3))
Rb2a = R_ast2int[kf_time[0], :].reshape((3, 3)).T.dot(Rb2i)
# scale and translate
kf_traj = scale * kf_traj + asteroid_pos[kf_time[0], :]
# plot keyframe motion without any modifications
kf_orig_fig = plt.figure()
kf_orig_ax = axes3d.Axes3D(kf_orig_fig)
kf_orig_ax.set_zlim3d(-1, 1)
kf_orig_ax.set_xlim3d(-3, 3)
kf_orig_ax.set_ylim3d(-3, 3)
kf_orig_ax.plot(kf_traj[:,0], kf_traj[:, 1], kf_traj[:, 2], 'b-*')
# plot the viewing direction
length = 0.3
for ii, R in enumerate(kf_R_first2cur):
view_axis = R.reshape((3,3))[:, 2]
kf_orig_ax.plot([kf_traj[ii, 0], kf_traj[ii, 0] + length * view_axis[0]],
[kf_traj[ii, 1], kf_traj[ii, 1] + length * view_axis[1]],
[kf_traj[ii, 2], kf_traj[ii, 2] + length * view_axis[2]],
'r')
kf_orig_ax.plot(kf_traj_true[:, 0], kf_traj_true[:, 1], kf_traj_true[:, 2], 'r')
kf_orig_ax.set_title('Keyframe Original')
kf_orig_ax.set_xlabel('X')
kf_orig_ax.set_ylabel('Y')
kf_orig_ax.set_zlabel('Z')
# plot the components
kf_comp_fig, kf_comp_ax = plt.subplots(3, 1, figsize=plotting.figsize(1), sharex=True)
kf_comp_ax[0].plot(kf_time, kf_traj[:, 0], 'b-*', label='Estimate')
kf_comp_ax[0].plot(kf_time, kf_traj_true[:, 0], 'r-*', label='True')
kf_comp_ax[0].set_ylim(0, 3)
kf_comp_ax[0].set_ylabel(r'$X$ (km)')
kf_comp_ax[1].plot(kf_time, kf_traj[:, 1], 'b-*', label='Estimate')
kf_comp_ax[1].plot(kf_time, kf_traj_true[:, 1], 'r-*', label='True')
kf_comp_ax[1].set_ylim(-2, 1)
kf_comp_ax[1].set_ylabel(r'$Y$ (km)')
kf_comp_ax[2].plot(kf_time, kf_traj[:, 2], 'b-*', label='Estimate')
kf_comp_ax[2].plot(kf_time, kf_traj_true[:, 2], 'r-*', label='True')
kf_comp_ax[2].set_ylim(-0.5, 2.5)
kf_comp_ax[2].set_ylabel(r'$Z$ (km)')
kf_comp_ax[2].set_xlabel('Time (sec)')
plt.legend(loc='best')
if save_fig:
plt.figure(kf_comp_fig.number)
plt.savefig(filename)
plt.show()
def create_plots(filename, plot_flags):
# logic to change the desired controller function
if plot_flags.mission == 'lissajous':
desired_attitude_func = controller.body_fixed_pointing_attitude
desired_translation_func = controller.inertial_lissajous_yz_plane
elif plot_flags.mission == 'circumnavigate':
desired_attitude_func = controller.body_fixed_pointing_attitude
desired_translation_func = controller.inertial_circumnavigate
# load the h5py file with all the imagery and simulation data
with h5py.File(filename, 'r') as sim_data:
sim_data.visit(printname)
K = sim_data['K']
i_state = sim_data['i_state']
time = sim_data['time']
images = sim_data['landing']
RT_vector = sim_data['RT']
R_bcam2i_vector = sim_data['R_i2bcam'] # the name is incorrect - actually it's bcamera to inertial frame
# R_ast2int = sim_data['Rast2inertial']
# define the asteroid and dumbbell objects like the simulation driver
ast_name = 'itokawa'
num_faces = 64
ast = asteroid.Asteroid(ast_name,num_faces)
dum = dumbbell.Dumbbell(m1=500, m2=500, l=0.003)
# draw some of the features from an example image
if plot_flags.feature_matching:
sift_flann_matching_image(images[:, :, :, 3000],
images[:, :, :, 3200], ratio=0.3,
plot=True,
filename='/tmp/itokawa_feature_matching.png',
save_fig=plot_flags.save_plots)
# draw the true and estimated trajectory
if plot_flags.simulation_plots:
if plot_flags.frame == 'inertial':
plotting.plot_controlled_blender_inertial(time,
i_state,
ast,
dum,
plot_flags.save_plots,
1,
controller.traverse_then_land_vertically,
controller.body_fixed_pointing_attitude)
elif plot_flags.frame == 'asteroid':
plotting.plot_controlled_blender_inertial_fixed_asteroid(time,
i_state,
ast,
dum,
plot_flags.save_plots,
1,
desired_translation_func,
desired_attitude_func)
# create animation
if plot_flags.animation:
if plot_flags.frame == 'inertial':
plotting.animate_inertial_trajectory(time, i_state, ast, dum, 3600, plot_flags.save_plots)
elif plot_flags.frame == 'asteroid':
plotting.animate_relative_trajectory(time, i_state, ast, dum, plot_flags.save_plots)
if plot_flags.keyframe:
# plot_keyframe_trajectory(time, i_state, R_ast2int, R_bcam2i_vector,
# plot_flags.save_plots, fwidth=1,
# filename='/tmp/keyframe_estimate.eps')
if plot_flags.mission == 'asteroid':
plot_keyframe_original_asteroid(time, i_state, R_bcam2i_vector,
plot_flags.save_plots, fwidth=1,
filename='/tmp/keyframe_estimate.eps',
kf_path=plot_flags.kf_path)
elif plot_flags.mission == 'lissajous':
plot_keyframe_original_lissajous(time, i_state, R_bcam2i_vector,
plot_flags.save_plots, fwidth=1,
filename='/tmp/keyframe_estimate.eps',
kf_path=plot_flags.kf_path)
if plot_flags.blender_png: # generate blender images
output_path = './visualization/blender'
num_images = images.shape[3]
for ii in range(num_images):
cv2.imwrite(output_path + '/test' + str.zfill(str(ii), 6) + '.png', images[:, :, :, ii])
print("Saving image {0}/{1}".format(ii, num_images))
print("Finished extracting all the images")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate plots from hdf simulation output')
parser.add_argument('filename', type=str, help='Filename of hdf file', action='store')
parser.add_argument('kf_path', type=str, help='Path to keyframe trajectory', action='store')
parser.add_argument("frame", help="Reference frame - asteroid or inertial", action="store")
parser.add_argument("mission", help="Misison to plot - lissajous or circumnavigate", action='store')
parser.add_argument("--feature_matching", help="Generate feature matching example", action="store_true")
parser.add_argument("--simulation_plots", help="Generate plots of the simulation",
action="store_true")
parser.add_argument("--animation", help="Generate an animation",
action="store_true")
parser.add_argument("--save_plots", help="Save plots to /tmp", action="store_true")
parser.add_argument("--keyframe", help="Plot output from ORB-SLAM2",
action="store_true")
parser.add_argument("--blender_png", help="Generate a series of Blender PNG images from the simulation",
action='store_true')
args = parser.parse_args()
create_plots(args.filename, args)
| gpl-3.0 |
jiajunshen/partsNet | pnet/combineQuardPool.py | 1 | 12520 | __author__ = 'jiajunshen'
import numpy as np
import itertools as itr
import amitgroup as ag
from pnet.layer import Layer
from sklearn import linear_model
from multiprocessing import Pool, Value, Array
from sklearn.utils.extmath import (safe_sparse_dot, logsumexp, squared_norm)
shared_data = None
def init(_data):
global shared_data
shared_data = _data
shared_X = None
shared_Y = None
def init_Image(X, Y):
global shared_X
global shared_Y
shared_X = X
shared_Y = Y
@Layer.register('quadrant-partition-svm-layer')
class CombineQuadrantPartitionSVMLayer(Layer):
def __init__(self, num_parts, part_shape, shape=(1, 1), strides=(1, 1), prune = None, settings={}):
# settings: outer_frame, random_seed,samples_per_image,patch_extraction_seed,threshold
#, outer_frame=1, threshold=1):
self._num_parts = num_parts
self._part_shape = part_shape
self._settings = settings
self._train_info = {}
self._partition = []
self._shape = shape
self._strides = strides
self._prune = prune
self._parts = None
self._svms = None
@property
def trained(self):
return self._parts is not None
def train(self, X, Y, OriginalX = None):
return self.split_train_svms(X, Y)
def split_train_svms(self, X, Y):
ag.info("Split training SVMs")
import time,random
if X.shape[-1] > 20:
num_of_processor = 3
else:
num_of_processor = 6
class1, class2 = split_2vs2(10)
round_each_proc = self._num_parts // 4 //num_of_processor
rs = np.random.RandomState(self._settings.get('random_seeds', 0))
#four quadrants
p = Pool(num_of_processor, initializer=init_Image, initargs=(X,Y,))
allObjects = []
for i in range(4):
args = [(i, j, round_each_proc, self._part_shape, (3,3), class1[round_each_proc * j: round_each_proc * (j + 1)],
class2[round_each_proc * j: round_each_proc * (j + 1)], self._settings.get('random_seeds', 0)) for j in range(num_of_processor)]
svm_objects = p.map(task_spread_svms, args)
for objects in svm_objects:
allObjects+=objects
self._svms = allObjects
self._parts = []
self._coef = []
self._intercept = []
if self._prune == None:
for i in range(self._num_parts):
print(i, self._num_parts)
svm_coef = allObjects[i].coef_
svm_intercept = allObjects[i].intercept_
allParam = np.zeros(svm_coef.shape[1] + 1)
allParam[:svm_coef.shape[1]] = svm_coef[0,:]
allParam[-1] = svm_intercept
std = np.std(allParam) * 20
#std = 1
print(np.std(allParam/std),np.max(allParam/std))
self._parts.append((svm_coef/std, svm_intercept/std))
self._coef.append(svm_coef/std)
self._intercept.append(svm_intercept/std)
else: # Here we will prune the parts
partList = []
coefList = []
interceptList = []
coefLengthList = []
for i in range(self._num_parts):
svm_coef = allObjects[i].coef_
svm_intercept = allObjects[i].intercept_
allParam = np.zeros(svm_coef.shape[1] + 1)
allParam[:svm_coef.shape[1]] = svm_coef[0,:]
allParam[-1] = svm_intercept
std = np.std(allParam) * 20
partList.append((svm_coef/std, svm_intercept/std))
coefList.append(svm_coef/std)
interceptList.append(svm_intercept/std)
coefLengthList.append(np.sum(abs(svm_coef/std)))
coefLengthList = np.array(coefLengthList)
coefMaxOrder = np.argsort(coefLengthList)[::-1]
for i in range(self._prune):
self._parts.append(partList[coefMaxOrder[i]])
self._coef.append(coefList[coefMaxOrder[i]])
self._intercept.append(interceptList[coefMaxOrder[i]])
self._coef = np.vstack(self._coef)
self._intercept = np.vstack(self._intercept)
print(np.max(self._coef))
print(np.mean(self._coef))
print(np.max(self._intercept))
print(np.mean(self._intercept))
def extract(self,X_all, Y = None, test_accuracy = False):
ag.info("randomPartition SVM start extracting")
outer_frame = self._settings.get('outer_frame', 0)
dim = (X_all.shape[1],X_all.shape[2])
if outer_frame != 0:
XX = np.zeros((X_all.shape[0], X_all.shape[1] + 2 * outer_frame, X_all.shape[2] + 2 * outer_frame, X_all.shape[3]), dtype=np.float16)
XX[:, outer_frame:X_all.shape[1] + outer_frame, outer_frame:X_all.shape[2] + outer_frame,:] = X_all
X_all = XX
numcl = 2
print("Before blowing up the memory")
roundNumber = 100
numEachRound = X_all.shape[0] // roundNumber
feature_map_list = []
for round in range(roundNumber):
print(round)
X = np.array(X_all[numEachRound * round : numEachRound * (round + 1)], dtype = np.float16)
X_num = X.shape[0]
if(numcl == 2):
feature_map = np.zeros((X.shape[0],) + dim + (self._num_parts,),dtype=np.float16)
else:
feature_map = np.zeros((X.shape[0],) + dim + (numcl, self._num_parts,),dtype=np.float16)
import itertools
argList = list(itertools.product(range(dim[0]),range(dim[1])))
p = Pool(4)
args = ((x, y, self._coef,self._intercept,
X[:,x:x+self._part_shape[0],
y:y+self._part_shape[1],:].reshape(X_num,-1).astype(np.float16)) for(x,y) in argList
)
count = 0
for x, y, score in p.imap(calcScore, args):
feature_map[:,x, y, :] = score
count+=1
p.terminate()
# Start to do pooling
relu = self._settings.get("relu", True)
from pnet.cyfuncs import activation_map_pooling as poolf
feature_map_list.append(poolf(feature_map.astype(np.float32), self._num_parts, self._shape, self._strides, relu).astype(np.float16))
result_map = np.vstack(feature_map_list)
result_map.astype(np.float16)
print(np.any(np.isnan(result_map)))
print(np.all(np.isfinite(result_map)))
return result_map
def save_to_dict(self):
d = {}
d['num_parts'] = self._num_parts
d['part_shape'] = self._part_shape
d['settings'] = self._settings
d['training_info'] = self._train_info
d['partition'] = self._partition
d['parts'] = self._parts
d['prune'] = self._prune
d['svms'] = self._svms
d['coef'] = self._coef
d['intercept'] = self._intercept
d['shape'] = self._shape
d['strides'] = self._strides
return d
@classmethod
def load_from_dict(cls, d):
obj = cls(d['num_parts'],d['part_shape'],d['shape'], d['strides'], d['prune'], d['settings'])
obj._train_info = d['training_info']
obj._partition = d['partition']
obj._parts = d['parts']
obj._svms = d['svms']
obj._coef = d['coef']
obj._intercept = d['intercept']
return obj
def calcScore((x, y, svmCoef, svmIntercept, data)):
result = (np.dot(data, svmCoef.T) + svmIntercept.T).astype(np.float16)
if np.any(np.isnan(result)) and (not np.all(np.isfinite(result))):
print ("result is nan")
print(result)
print(data)
print(svmCoef.T)
print(svmIntercept.T)
return x, y, result
def task_spread_svms((quad, i, round_each_proc, part_shape, sample_shape, class1, class2, currentSeeds)):
svm_objects = []
for j in range(round_each_proc * i, round_each_proc * (i + 1)):
#everytime we pick only one location,
#print j
quadIndex_x = quad % 2
x_size = shared_X.shape[1] // 2
quadIndex_y = quad // 2
y_size = shared_X.shape[2] // 2
patches, patches_label = get_color_patches_location(shared_X[:, quadIndex_x * x_size:(quadIndex_x + 1) * x_size,
quadIndex_y * y_size : (quadIndex_y + 1) * y_size, :],
shared_Y, class1[j - i * round_each_proc], class2[j - i * round_each_proc],
locations_per_try=1, part_shape=part_shape,
sample_shape=sample_shape,fr = 1, randomseed=j + currentSeeds,
threshold=0, max_samples=300000)
clf = linear_model.SGDClassifier(alpha=0.001, loss = "log",penalty = 'l2', n_iter=20, shuffle=True,verbose = False,
learning_rate = "optimal", eta0 = 0.0, epsilon=0.1, random_state = None, warm_start=False,
power_t=0.5, l1_ratio=1.0, fit_intercept=True)
clf.fit(patches, patches_label)
print(np.mean(clf.predict(patches) == patches_label))
svm_objects.append(clf)
return svm_objects
def get_color_patches_location(X, Y, class1, class2, locations_per_try, part_shape, sample_shape, fr, randomseed, threshold, max_samples):
assert X.ndim == 4
channel = X.shape[-1]
patches = []
patches_label = []
rs = np.random.RandomState(randomseed)
#th = self._settings['threshold']
th = threshold
w, h = [X.shape[1 + j]-part_shape[j] - sample_shape[j] +2 for j in range(2)]
#print w,h
indices = list(itr.product(range(w-1), range(h-1)))
rs.shuffle(indices)
i_iter = itr.cycle(iter(indices))
count = 0
for trie in range(locations_per_try):
x, y = next(i_iter)
#print "sampled_x"
#print(x,y)
for i in range(X.shape[0]):
Xi = X[i]
# How many patches could we extract?
if(Y[i] not in class1 and Y[i] not in class2):
continue
count+=1
for x_i in range(sample_shape[0]):
for y_i in range(sample_shape[1]):
#print(x_i, y_i)
selection = [slice(x + x_i, x + x_i + part_shape[0]), slice(y + y_i, y+ y_i + part_shape[1])]
patch = Xi[selection]
#edgepatch_nospread = edges_nospread[selection]
if fr == 0:
tot = patch.sum()
else:
tot = abs(patch[fr:-fr,fr:-fr]).sum()
if th <= tot * channel:
patches.append(patch)
if(Y[i] in class1):
patches_label.append(0)
else:
patches_label.append(1)
if len(patches) >= max_samples:
patches = np.asarray(patches)
patches = patches.reshape((patches.shape[0], -1))
return np.asarray(patches),np.asarray(patches_label).astype(np.uint8)
#ag.info('ENDING: {} patches'.format(len(patches)))
patches = np.asarray(patches)
patches = patches.reshape((patches.shape[0], -1))
return np.asarray(patches),np.asarray(patches_label).astype(np.uint8)
def split_2vs2(n):
result = [[],[]]
path = []
findSets(result, path, n, 0)
return result[0], result[1]
def findSets(result, path, n, currentIndex):
import copy
if (len(path) == 4):
print "===="
print(len(path))
print(path)
print(len(result))
currentResult = [[],[]]
currentResult[0] = [path[0], path[0]]
for i in range(1, 4):
currentResult[0][1] = path[i]
currentResult[1] = [k for k in path[1:4] if k!=path[i]]
print(currentResult)
result[0].append(currentResult[0])
result[1].append(currentResult[1])
elif (currentIndex == n):
return
else:
for i in range(currentIndex, n):
newpath = copy.deepcopy(path)
newpath.append(i)
findSets(result, newpath, n, i + 1)
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
zaxtax/scikit-learn | examples/covariance/plot_outlier_detection.py | 41 | 4216 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1),
"Isolation Forest": IsolationForest(max_samples=n_samples, random_state=rng)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 3, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
meteoswiss-mdr/precipattractor | pyscripts/radar_statistics.py | 1 | 61766 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
from PIL import Image
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pylab
import numpy as np
import shutil
import datetime
import time
import warnings
from collections import OrderedDict
import pyfftw
from scipy import stats
import scipy.ndimage as ndimage
import pywt
from pyearth import Earth
import cv2
import getpass
usrName = getpass.getuser()
#### Import personal libraries
import time_tools_attractor as ti
import io_tools_attractor as io
import data_tools_attractor as dt
import stat_tools_attractor as st
import optical_flow as of
import maple_ree
import gis_base as gis
################
np.set_printoptions(precision=2)
noData = -999.0
fmt1 = "%.1f"
fmt2 = "%.2f"
fmt3 = "%.3f"
fmt4 = "%.4f"
fmt5 = "%.5f"
########SET DEFAULT ARGUMENTS##########
timeAccumMin = 5
resKm = 1 # To compute FFT frequency
inBaseDir = '/scratch/' + usrName + '/data/' # directory to read from
outBaseDir = '/store/msrad/radar/precip_attractor/data/'
fourierVar = 'dbz' # field on which to perform the fourier analysis ('rainrate' or 'dbz')
scalingBreakArray_KM = [12] #np.arange(6, 42, 2) # [15]
maxBeta1rangeKM = 512
minBeta2rangeKM = 4
fftDomainSize = 512
FFTmod = 'NUMPY' # 'FFTW' or 'NUMPY'
windowFunction = 'none' #'blackman' or 'none'
########GET ARGUMENTS FROM CMD LINE####
parser = argparse.ArgumentParser(description='Compute radar rainfall field statistics.')
parser.add_argument('-start', default='201601310600', type=str,help='Starting date YYYYMMDDHHmmSS.')
parser.add_argument('-end', default='201601310600', type=str,help='Ending date YYYYMMDDHHmmSS.')
parser.add_argument('-product', default='AQC', type=str,help='Which radar rainfall product to use (AQC, CPC, etc).')
parser.add_argument('-plot', default=0, type=int,help='Whether to plot the rainfall fields and the power spectra.')
parser.add_argument('-analysis', nargs='+', default=['autocorr', 'of'], type=str,help='Type of analysis to do (1d, 2d, of, autocorr, wavelets, 1dnoise, 2dnoise).')
parser.add_argument('-wols', default=0, type=int,help='Whether to use the weighted ordinary leas squares or not in the fitting of the power spectrum.')
parser.add_argument('-minR', default=0.08, type=float,help='Minimum rainfall rate for computation of WAR and various statistics.')
parser.add_argument('-format', default="netcdf", type=str,help='File format for output statistics (netcdf or csv).')
parser.add_argument('-accum', default=5, type=int,help='Accumulation time of the product [minutes].')
parser.add_argument('-temp', default=5, type=int,help='Temporal sampling of the products [minutes].')
args = parser.parse_args()
timeStartStr = args.start
timeEndStr = args.end
boolPlotting = args.plot
product = args.product
weightedOLS = args.wols
timeAccumMin = args.accum
analysis = args.analysis
if set(analysis).issubset(['1d', '2d', 'of', 'autocorr', '2d+autocorr', '1d+2d+autocorr', 'wavelets', '1dnoise', '2dnoise']) == False:
print('You have to ask for a valid analysis [1d, 2d, of, autocorr, 2d+autocorr, 1d+2d+autocorr, wavelets, 1dnoise, 2dnoise]')
sys.exit(1)
if type(scalingBreakArray_KM) != list and type(scalingBreakArray_KM) != np.ndarray:
scalingBreakArray_KM = [scalingBreakArray_KM]
if len(scalingBreakArray_KM) > 1:
variableBreak = 1
else:
variableBreak = 0
if (timeAccumMin == 60) | (timeAccumMin == 60*24):
timeSampMin = timeAccumMin
else:
timeSampMin = args.temp
if args.format == 'netcdf':
strFileFormat = '.nc'
elif args.format == 'csv':
strFileFormat = '.csv'
else:
print('File -format', args.format, ' not valid')
sys.exit(1)
if (int(args.start) > int(args.end)):
print('Time end should be after time start')
sys.exit(1)
if (int(args.start) < 198001010000) or (int(args.start) > 203001010000):
print('Invalid -start or -end time arguments.')
sys.exit(1)
else:
timeStartStr = args.start
timeEndStr = args.end
if (product == 'AQC') or (product == 'CPC'):
print('Computing statistics on ', args.product)
else:
print('Invalid -product argument.')
sys.exit(1)
if fourierVar == 'rainrate':
unitsSpectrum = r"Rainfall field power $\left[ 10\mathrm{log}_{10}\left(\frac{(mm/hr)^2}{km^2}\right)\right]$"
elif fourierVar == 'dbz':
unitsSpectrum = r"Reflectivity field power $\left[ 10\mathrm{log}_{10}\left(\frac{dBZ^2}{km^2}\right)\right]$"
###################################
# Get dattime from timestamp
timeStart = ti.timestring2datetime(timeStartStr)
timeEnd = ti.timestring2datetime(timeEndStr)
timeAccumMinStr = '%05i' % timeAccumMin
timeAccum24hStr = '%05i' % (24*60)
## COLORMAPS
color_list, clevs, clevsStr = dt.get_colorlist('MeteoSwiss') #'STEPS' or 'MeteoSwiss'
cmap = colors.ListedColormap(color_list)
norm = colors.BoundaryNorm(clevs, cmap.N)
cmap.set_over('black',1)
cmapMask = colors.ListedColormap(['black'])
# Load background DEM image
dirDEM = '/users/' + usrName + '/scripts/shapefiles'
fileNameDEM = dirDEM + '/ccs4.png'
isFile = os.path.isfile(fileNameDEM)
if (isFile == False):
print('File: ', fileNameDEM, ' not found.')
else:
print('Reading: ', fileNameDEM)
demImg = Image.open(fileNameDEM)
demImg = dt.extract_middle_domain_img(demImg, fftDomainSize, fftDomainSize)
demImg = demImg.convert('P')
# Limits of CCS4 domain
Xmin = 255000
Xmax = 965000
Ymin = -160000
Ymax = 480000
allXcoords = np.arange(Xmin,Xmax+resKm*1000,resKm*1000)
allYcoords = np.arange(Ymin,Ymax+resKm*1000,resKm*1000)
# Set shapefile filename
fileNameShapefile = dirDEM + '/CHE_adm0.shp'
proj4stringWGS84 = "+proj=longlat +ellps=WGS84 +datum=WGS84"
proj4stringCH = "+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \
+k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs"
#proj4stringCH = "+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \
#+k_0=1 +x_0=2600000 +y_0=1200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs"
# Array containing the statistics for one single day
nrFilesDay = 24*(60/timeAccumMin)
##### LOOP OVER FILES ##########################################################
# Rainfall stack
nrValidFields = 0
stackSize = 12
rainfallStack = np.zeros((stackSize,fftDomainSize,fftDomainSize))
waveletStack = [None] * stackSize
# Flow stack
zStack = []
tStack = []
rowStack = []
colStack = []
uStack = []
vStack = []
## Daily arrays to write out
dailyStats = []
dailyU = []
dailyV = []
dailyTimesUV = []
dailyWavelets = []
dailyTimesWavelets = []
tic = time.clock()
timeLocal = timeStart
while timeLocal <= timeEnd:
ticOneImg = time.clock()
# Read in radar image into object
timeLocalStr = ti.datetime2timestring(timeLocal)
r = io.read_gif_image(timeLocalStr, product='AQC', minR = args.minR, fftDomainSize = 512, \
resKm = 1, timeAccumMin = 5, inBaseDir = '/scratch/lforesti/data/', noData = -999.0, cmaptype = 'MeteoSwiss', domain = 'CCS4')
hourminStr = ti.get_HHmm_str(timeLocal.hour, timeLocal.minute) # Used to write out data also when there is no valid radar file
minWAR = 0.1
if r.war >= minWAR:
Xmin = r.extent[0]
Xmax = r.extent[1]
Ymin = r.extent[2]
Ymax = r.extent[3]
# Move older rainfall fields down the stack
for s in range(0, rainfallStack.shape[0]-1):
rainfallStack[s+1,:] = rainfallStack[s,:]
# Add last rainfall field on top
rainfallStack[0,:] = r.dBZFourier
# Increment nr of consecutive valid rainfall fields (war >= 0.01)
nrValidFields += 1
########### Compute velocity field ##############
# It will be used to estimate the Lagrangian auto-correlation
if (nrValidFields >= 2) and ('of' in analysis):
print('\t')
ticOF = time.clock()
# extract consecutive images
prvs = rainfallStack[1].copy()
next = rainfallStack[0].copy()
prvs *= 255.0/np.max(prvs)
next *= 255.0/np.max(next)
# 8-bit int
prvs = np.ndarray.astype(prvs,'uint8')
next = np.ndarray.astype(next,'uint8')
# plt.figure()
# plt.imshow(prvs)
# plt.colorbar()
# plt.show()
# remove small noise with a morphological operator (opening)
prvs = of.morphological_opening(prvs, thr=r.zerosDBZ, n=5)
next = of.morphological_opening(next, thr=r.zerosDBZ, n=5)
#+++++++++++ Optical flow parameters
maxCornersST = 500 # Number of asked corners for Shi-Tomasi
qualityLevelST = 0.05
minDistanceST = 5 # Minimum distance between the detected corners
blockSizeST = 15
winsizeLK = 100 # Small windows (e.g. 10) lead to unrealistic high speeds
nrLevelsLK = 0 # Not very sensitive parameter
kernelBandwidth = 100 # Bandwidth of kernel interpolation of vectors
maxSpeedKMHR = 100 # Maximum allowed speed
nrIQRoutlier = 3 # Nr of IQR above median to consider the vector as outlier (if < 100 km/hr)
#++++++++++++++++++++++++++++++++++++
# (1b) Shi-Tomasi good features to track
p0, nCorners = of.ShiTomasi_features_to_track(prvs, maxCornersST, qualityLevel=qualityLevelST, minDistance=minDistanceST, blockSize=blockSizeST)
print("Nr of points OF ShiTomasi =", len(p0))
# (2) Lucas-Kanade tracking
col, row, u, v, err = of.LucasKanade_features_tracking(prvs, next, p0, winSize=(winsizeLK,winsizeLK), maxLevel=nrLevelsLK)
# (3) exclude outliers
speed = np.sqrt(u**2 + v**2)
q1, q2, q3 = np.percentile(speed, [25,50,75])
maxspeed = np.min((maxSpeedKMHR/12, q2 + nrIQRoutlier*(q3 - q1)))
minspeed = np.max((0,q2 - 2*(q3 - q1)))
keep = (speed <= maxspeed) # & (speed >= minspeed)
print('Max speed =',np.max(speed)*12)
print('Median speed =',np.percentile(speed,50)*12)
print('Speed threshold =',maxspeed*12)
# Plot histogram of speeds
# plt.close()
# plt.hist(speed*12, bins=30)
# plt.title('min = %1.1f, max = %1.1f' % (minspeed*12,maxspeed*12))
# plt.axvline(x=maxspeed*12)
# plt.xlabel('Speed [km/hr]')
# plt.show()
u = u[keep].reshape(np.sum(keep),1)
v = v[keep].reshape(np.sum(keep),1)
row = row[keep].reshape(np.sum(keep),1)
col = col[keep].reshape(np.sum(keep),1)
# (4) stack vectors within time window
rowStack.append(row)
colStack.append(col)
uStack.append(u)
vStack.append(v)
# convert lists of arrays into single arrays
row = np.vstack(rowStack)
col = np.vstack(colStack)
u = np.vstack(uStack)
v = np.vstack(vStack)
if (nrValidFields >= 4):
colStack.pop(0)
rowStack.pop(0)
uStack.pop(0)
vStack.pop(0)
# (1) decluster sparse motion vectors
col, row, u, v = of.declustering(col, row, u, v, R = 20, minN = 3)
print("Nr of points OF after declustering =", len(row))
# (2) kernel interpolation
domainSize = [fftDomainSize, fftDomainSize]
colgrid, rowgrid, U, V, b = of.interpolate_sparse_vectors_kernel(col, row, u, v, domainSize, b = kernelBandwidth)
print('Kernel bandwith =',b)
# Add U,V fields to daily collection
dailyU.append(U)
dailyV.append(-V) # Reverse V orientation (South -> North)
dailyTimesUV.append(timeLocalStr)
# Compute advection
# resize motion fields by factor f (for advection)
f = 0.5
if (f<1):
Ures = cv2.resize(U, (0,0), fx=f, fy=f)
Vres = cv2.resize(V, (0,0), fx=f, fy=f)
else:
Ures = U
Vres = V
tocOF = time.clock()
# Call MAPLE routine for advection
net = 1
rainfield_lag1 = maple_ree.ree_epol_slio(rainfallStack[1], Vres, Ures, net)
# Call MAPLE routine for advection over several time stamps
# net = np.min([12, nrValidFields])
# for lag in range(2,net):
# rainfield_advected = maple_ree.ree_epol_slio(rainfallStack[2], Vres, Ures, net)
# plt.close()
# plt.subplot(121)
# plt.imshow(rainfallStack[1], vmin=8, vmax=55)
# plt.subplot(122)
# plt.imshow(rainfield_lag1[:,:,-1], vmin=8, vmax=55)
# plt.show()
# sys.exit()
# Resize vector fields for plotting
xs, ys, Us, Vs = of.reduce_field_density_for_plotting(colgrid, rowgrid, U, V, 25)
# Plot vectors to check if correct
# plt.quiver(xs, ys, Us, Vs)
# plt.show()
print('Elapsed time OF: ', tocOF - ticOF, ' seconds.')
print('\t')
########### Compute Wavelet transform ###########
if 'wavelets' in analysis:
wavelet = 'haar'
w = pywt.Wavelet(wavelet)
#print(w)
# Upscale field in rainrate
wavelet_coeff = st.wavelet_decomposition_2d(r.rainrate, wavelet, nrLevels = None)
# Transform into dBZ
for level in range(0,len(wavelet_coeff)):
wavelet_coeff[level],_,_ = dt.rainrate2reflectivity(wavelet_coeff[level])
# Generate coordinates of centers of wavelet coefficients
xvecs, yvecs = st.generate_wavelet_coordinates(wavelet_coeff, r.dBZFourier.shape, Xmin, Xmax, Ymin, Ymax, resKm*1000)
# Append a given wavelet scale to write out into daily netCDF files
scaleKm_asked = 8
scale2keep = st.get_level_from_scale(resKm, scaleKm_asked)
scaleKm = xvecs[scale2keep][1] - xvecs[scale2keep][0]
scaleKm = int(scaleKm/1000)
if scaleKm_asked != scaleKm:
print('Asked and returned wavelet scales not matching.', scaleKm_asked, 'vs', scaleKm)
sys.exit()
else:
print('Wavelet scale = ', scaleKm, 'km')
dailyWavelets.append(wavelet_coeff[scale2keep])
dailyTimesWavelets.append(timeLocalStr)
# # Write out wavelet coefficients to netCDF file
# # Keep only large scales (smaller file size)
# wavelet_coeff_image = wavelet_coeff[1:]
# analysisType = 'WAVELET'
# fileNameWavelet,_,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal, product, \
# timeAccumMin=timeAccumMin, quality=0, format='netcdf')
# io.write_netcdf_waveletcoeffs(fileNameWavelet, timeLocalStr, \
# xvecs, yvecs, wavelet_coeff_image, waveletType = wavelet)
# print('Saved:', fileNameWavelet)
## Add wavelet coeffs to the stack
for s in range(0, len(waveletStack)-1):
waveletStack[s+1] = waveletStack[s]
waveletStack[0] = wavelet_coeff
# # Full wavelet decomposition to get also the HDV residual components
waveletHVD = False
nrLevels = 6
if waveletHVD:
coeffs = pywt.wavedec2(r.dBZFourier, w, level=nrLevels)
#cA2, (cH2, cV2, cD2), (cH1, cV1, cD1) = coeffs
cA2 = coeffs[0]
# ###### Use wavelets to generate a field of correlated noise
waveletNoise = False
level2perturb = [3,4,5]
nrMembers = 3
if waveletNoise:
# Generate white noise at a given level
stochasticEnsemble = st.generate_wavelet_noise(r.dBZFourier, w, nrLevels, level2perturb, nrMembers)
########### Compute Fourier power spectrum ###########
ticFFT = time.clock()
minFieldSize = np.min(fftDomainSize)
# Replace zeros with the lowest rainfall threshold (to obtain better beta2 estimations)
if fourierVar == 'rainrate':
rainfieldFourier = r.rainrate
rainfieldFourier[rainfieldFourier < args.minR] = args.minR
if fourierVar == 'dbz':
rainfieldFourier = r.dBZFourier
zerosDBZ,_,_ = dt.rainrate2reflectivity(args.minR)
# Method 1: Set the zeros to the dBZ threshold
# rainfieldFourier[rainfieldFourier < zerosDBZ] = zerosDBZ
# Method 2: Remove the dBZ threshold to all data
rainfieldFourier = rainfieldFourier - zerosDBZ
# plt.imshow(rainfieldFourier)
# plt.colorbar()
# plt.show()
# Compute 2D power spectrum
psd2d, freqAll = st.compute_2d_spectrum(rainfieldFourier, resolution=resKm, window=None, FFTmod='NUMPY')
# Compute autocorrelation using inverse FFT of spectrum
if ('autocorr' in analysis) or ('1d' in analysis) or ('2d+autocorr' in analysis) or ('1d+2d+autocorr' in analysis) or ('wavelets' in analysis):
# Compute autocorrelation
autocorr,_,_,_ = st.compute_autocorrelation_fft2(rainfieldFourier, FFTmod = 'NUMPY')
# Compute anisotropy from autocorrelation function
autocorrSizeSub = 255
percentileZero = 90
autocorrSub, eccentricity_autocorr, orientation_autocorr, xbar_autocorr, ybar_autocorr, eigvals_autocorr, eigvecs_autocorr, percZero_autocorr,_ = st.compute_fft_anisotropy(autocorr, autocorrSizeSub, percentileZero, rotation=False)
if ('2d' in analysis) or ('2d+autocorr' in analysis) or ('1d+2d+autocorr' in analysis) or ('wavelets' in analysis):
cov2logPS = True # Whether to compute the anisotropy on the log of the 2d PS
# Extract central region of 2d power spectrum and compute covariance
if cov2logPS:
psd2d_anis = 10.0*np.log10(psd2d)
else:
psd2d_anis = np.copy(psd2d)
# Compute anisotropy from FFT spectrum
fftSizeSub = 40#255
percentileZero = 90
smoothing_sigma = 3
psd2dsub, eccentricity_ps, orientation_ps, xbar_ps, ybar_ps, eigvals_ps, eigvecs_ps, percZero_ps, psd2dsubSmooth = st.compute_fft_anisotropy(psd2d_anis, fftSizeSub, percentileZero, sigma = smoothing_sigma)
print(percentileZero,'- percentile = ', percZero_ps)
# Compute 1D radially averaged power spectrum
psd1d, freq, wavelengthKm = st.compute_radialAverage_spectrum(psd2d, resolution=resKm)
############ Compute spectral slopes Beta
r_beta1_best = 0
r_beta2_best = 0
for s in range(0,len(scalingBreakArray_KM)):
scalingBreak_KM = scalingBreakArray_KM[s]
largeScalesLims = np.array([maxBeta1rangeKM, scalingBreak_KM])
smallScalesLims = np.array([scalingBreak_KM, minBeta2rangeKM])
idxBeta1 = (wavelengthKm <= largeScalesLims[0]) & (wavelengthKm > largeScalesLims[1]) # large scales
idxBeta2 = (wavelengthKm <= smallScalesLims[0]) & (wavelengthKm > smallScalesLims[1]) # small scales
idxBetaBoth = (wavelengthKm <= largeScalesLims[0]) & (wavelengthKm > smallScalesLims[1]) # all scales
#print('Nr points beta1 = ', np.sum(idxBeta1))
#print('Nr points beta2 = ', np.sum(idxBeta2))
#io.write_csv('/users/' + usrName + '/results/ps_marco.csv', ['freq','psd'], np.asarray([freq,psd1d]).T.tolist())
# Compute betas using OLS
if weightedOLS == 0:
beta1, intercept_beta1, r_beta1 = st.compute_beta_sm(10*np.log10(freq[idxBeta1]),10*np.log10(psd1d[idxBeta1]))
beta2, intercept_beta2, r_beta2 = st.compute_beta_sm(10*np.log10(freq[idxBeta2]), 10*np.log10(psd1d[idxBeta2]))
elif weightedOLS == 1:
# Compute betas using weighted OLS
linWeights = len(freq[idxBeta1]) - np.arange(len(freq[idxBeta1]))
#logWeights = 10*np.log10(linWeights)
logWeights = linWeights
beta1, intercept_beta1,r_beta1 = st.compute_beta_sm(10*np.log10(freq[idxBeta1]), 10*np.log10(psd1d[idxBeta1]), logWeights)
linWeights = len(freq[idxBeta2]) - np.arange(len(freq[idxBeta2]))
#logWeights = 10*np.log10(linWeights)
logWeights = linWeights
beta2, intercept_beta2, r_beta2 = st.compute_beta_sm(10*np.log10(freq[idxBeta2]), 10*np.log10(psd1d[idxBeta2]), logWeights)
else:
print("Please set weightedOLS either to 0 or 1")
sys.exit(1)
# Select best fit based on scaling break
if np.abs(r_beta1 + r_beta2) > np.abs(r_beta1_best + r_beta2_best):
r_beta1_best = r_beta1
r_beta2_best = r_beta2
beta1_best = beta1
intercept_beta1_best = intercept_beta1
beta2_best = beta2
intercept_beta2_best = intercept_beta2
scalingBreak_best = scalingBreak_KM
smallScalesLims_best = smallScalesLims
largeScalesLims_best = largeScalesLims
scalingBreak_Idx = idxBeta2[0]
r_beta1 = r_beta1_best
r_beta2 = r_beta2_best
beta1 = beta1_best
beta2 = beta2_best
intercept_beta1 = intercept_beta1_best
intercept_beta2 = intercept_beta2_best
smallScalesLims = smallScalesLims_best
largeScalesLims = largeScalesLims_best
if variableBreak == 1:
print("Best scaling break corr. = ", scalingBreak_best, ' km')
else:
print("Fixed scaling break = ", scalingBreak_best, ' km')
#### Fitting spectral slopes with MARS (Multivariate Adaptive Regression Splines)
useMARS = False
if useMARS:
model = Earth(max_degree = 1, max_terms = 2)
model.fit(dt.to_dB(freq[idxBetaBoth]), dt.to_dB(psd1d[idxBetaBoth]))
mars_fit = model.predict(dt.to_dB(freq[idxBetaBoth]))
# plt.scatter(dt.to_dB(freq),dt.to_dB(psd1d))
# plt.plot(dt.to_dB(freq[idxBetaBoth]), mars_fit)
# plt.show()
# print(model.trace())
# print(model.summary())
# print(model.basis_)
# print(model.coef_[0])
#y_prime_hat = model.predict_deriv(dt.to_dB(freq[idxBetaBoth]), 'x6')
scalingBreak_MARS = str(model.basis_[2])[2:7]
scalingBreak_MARS_KM = 1.0/dt.from_dB(float(scalingBreak_MARS))
print("Best scaling break MARS = ", scalingBreak_MARS_KM, ' km')
tocFFT = time.clock()
#print('FFT time: ', tocFFT-ticFFT, ' seconds.')
##################### COMPUTE SUMMARY STATS #####################################
# Compute field statistics in rainfall units
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
rainmean = np.nanmean(r.rainrate.ravel())
rainstd = np.nanstd(r.rainrate.ravel())
raincondmean = np.nanmean(r.rainrateNans.ravel())
raincondstd = np.nanstd(r.rainrateNans.ravel())
# Compute field statistics in dBZ units
dBZmean = np.nanmean(r.dBZ.ravel())
dBZstd = np.nanstd(r.dBZ.ravel())
dBZcondmean = np.nanmean(r.dBZNans.ravel())
dBZcondstd = np.nanstd(r.dBZNans.ravel())
# Compute Eulerian Auto-correlation
if (nrValidFields >= 2) and ('of' in analysis):
corr_eul_lag1 = np.corrcoef(rainfallStack[0,:].flatten(), rainfallStack[1,:].flatten())
corr_eul_lag1 = corr_eul_lag1[0,1]
print("Eulerian correlation =", fmt3 % corr_eul_lag1)
# Compute Eulerian correlation at each wavelet coeff level
# corr_eul_wavelet_levels = []
# for level in range(0,len(wavelet_coeff)):
# corr_eul_level = np.corrcoef(np.array(waveletStack[0][level]).flatten(), np.array(waveletStack[1][level]).flatten())
# corr_eul_level = corr_eul_level[0,1]
# corr_eul_wavelet_levels.append(corr_eul_level)
# print(corr_eul_wavelet_levels)
# plt.figure()
# plt.scatter(rainfallStack[0,:], rainfallStack[1,:])
# plt.show()
else:
corr_eul_lag1 = np.nan
# Compute Lagrangian auto-correlation
if (nrValidFields >= 2) and ('of' in analysis):
corr_lagr_lag1 = np.corrcoef(rainfield_lag1.flatten(), rainfallStack[0,:].flatten())
corr_lagr_lag1 = corr_lagr_lag1[0,1]
print("Lagrangian correlation =", fmt3 % corr_lagr_lag1)
print("Diff. Lagr-Eul correlation =", fmt3 % (corr_lagr_lag1 - corr_eul_lag1))
# plt.figure()
# plt.scatter(rainfallStack[0,:], rainfallStack[1,:])
# plt.show()
corr_lagr_lags = []
for lag in range(1,net):
corr_lagr = np.corrcoef(rainfield_advected[lag].flatten(), rainfallStack[0,:].flatten())
corr_lagr_lags.append(corr_lagr[0,1])
print('Lagrangian correlation lags =', corr_lagr_lags)
else:
corr_lagr_lag1 = np.nan
################### COLLECT DAILY STATS
timeStampStr = ti.datetime2timestring(timeLocal)
# Headers
headers = ['time', 'alb', 'doe', 'mle', 'ppm', 'wei', 'war', 'r_mean', 'r_std', 'r_cmean', 'r_cstd',
'dBZ_mean', 'dBZ_std', 'dBZ_cmean', 'dBZ_cstd',
'beta1', 'corr_beta1', 'beta2', 'corr_beta2' , 'scaling_break', 'eccentricity', 'orientation',
'corr_eul_lag1', 'corr_lagr_lag1']
if '2d' in analysis:
eccentricity = eccentricity_ps
orientation = orientation_ps
else:
eccentricity = eccentricity_autocorr
orientation = orientation_autocorr
# Data
instantStats = [timeStampStr,
str(r.alb),
str(r.dol),
str(r.lem),
str(r.ppm),
str(r.wei),
fmt4 % r.war,
fmt5 % rainmean,
fmt5 % rainstd,
fmt5 % raincondmean,
fmt5 % raincondstd,
fmt4 % dBZmean,
fmt4 % dBZstd,
fmt4 % dBZcondmean,
fmt4 % dBZcondstd,
fmt4 % beta1,
fmt4 % r_beta1,
fmt4 % beta2,
fmt4 % r_beta2,
int(scalingBreak_best),
fmt4 % eccentricity,
fmt4 % orientation,
fmt4 % corr_eul_lag1,
fmt4 % corr_lagr_lag1
]
print('+++++++ Radar statistics +++++++')
outputPrint = OrderedDict(zip(headers, instantStats))
print(outputPrint)
print('++++++++++++++++++++++++++++++++')
# Append statistics to daily array
dailyStats.append(instantStats)
######################## PLOT WAVELETS ######################
if 'wavelets' in analysis and boolPlotting:
if waveletNoise:
nrRows,nrCols = dt.optimal_size_subplot(nrMembers+1)
# Adjust figure parameters
ratioFig = nrCols/nrRows
figWidth = 14
colorbar = 'off'
fig = plt.figure(figsize=(ratioFig*figWidth,figWidth))
padding = 0.01
plt.subplots_adjust(hspace=0.05, wspace=0.01)
mpl.rcParams['image.interpolation'] = 'nearest'
# Plot rainfield
plt.subplot(nrRows, nrCols, 1)
PC = plt.imshow(r.dBZFourier, vmin=15, vmax=45)
plt.title('Rainfield [dBZ]',fontsize=15)
plt.axis('off')
# Plot stochastic ensemble
for member in range(0, nrMembers):
plt.subplot(nrRows, nrCols, member+2)
plt.imshow(stochasticEnsemble[member],vmin=15, vmax=45)
plt.title('Member '+ str(member+1), fontsize=15)
plt.axis('off')
plt.suptitle('Stochastic ensemble based on wavelet type: ' + wavelet + '\n by perturbing levels ' + str(level2perturb), fontsize=20)
stringFigName = '/users/lforesti/results/' + product + r.yearStr + r.julianDayStr + r.hourminStr + '-' + wavelet + '-waveletEnsemble_' + timeAccumMinStr + '.png'
plt.savefig(stringFigName, dpi=300)
print(stringFigName, ' saved.')
plt.close()
if waveletHVD:
# Plot of all the horizontal, diagonal and vertical components of the wavelet transform
pltWavelets = ['H','V','D']
nrPlots = (len(coeffs)-1)*len(pltWavelets)+2
mpl.rcParams['image.interpolation'] = 'none'
nrRows,nrCols = dt.optimal_size_subplot(nrPlots)
print('Nr. plots = ' + str(nrPlots), ' in ', str(nrRows), 'x', str(nrCols))
# Adjust figure parameters
ratioFig = nrCols/nrRows
figWidth = 14
colorbar = 'off'
fig = plt.figure(figsize=(ratioFig*figWidth,figWidth))
padding = 0.01
plt.subplots_adjust(hspace=0.05, wspace=0.01)
###
# Plot rainfield
ax1 = plt.subplot(nrRows, nrCols, 1)
PC = plt.imshow(r.dBZFourier, vmin=15, vmax=45)
plt.title('Rainfield [dBZ]')
plt.axis('off')
# Colorbar
if colorbar == 'on':
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=padding)
cbar = plt.colorbar(PC, cax = cax1)
nplot = 2
for level in range(1,nrLevels+1):
for p in range(0,len(pltWavelets)):
waveletLevel = nrLevels+1 - level
# Plot wavelet coefficients for horizontal/vertical/diagonal components
var = coeffs[waveletLevel][p]
minimum = np.percentile(var, 1)
maximum = np.percentile(var, 99)
ax1 = plt.subplot(nrRows, nrCols, nplot)
PC = plt.imshow(var, vmin=minimum, vmax=maximum, aspect=var.shape[1]/var.shape[0])
if p == 0:
titleStr = 'Level ' + str(level) + ' - horizontal'
if p == 1:
titleStr = 'Level ' + str(level) + ' - vertical'
if p == 2:
titleStr = 'Level ' + str(level) + ' - diagonal'
plt.title(titleStr)
plt.axis('off')
# Colorbar
if colorbar == 'on':
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=padding)
cbar = plt.colorbar(PC, cax = cax1)
nplot = nplot + 1
# Plot approximation at last scale
minimum = np.percentile(cA2, 1)
maximum = np.percentile(cA2, 99)
ax1 = plt.subplot(nrRows, nrCols, nplot)
PC = plt.imshow(cA2, aspect=cA2.shape[1]/cA2.shape[0])
plt.title('Approximation')
plt.axis('off')
# Colorbar
if colorbar == 'on':
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=padding)
cbar = plt.colorbar(PC, cax = cax1)
plt.suptitle('Wavelet type: ' + wavelet, fontsize=20)
#plt.show()
waveletDirs = "".join(pltWavelets)
stringFigName = '/users/lforesti/results/' + product + r.yearStr + r.julianDayStr \
+ r.hourminStr + '-' + wavelet + '-wavelet_' + waveletDirs + '_' + timeAccumMinStr + '.png'
plt.savefig(stringFigName, dpi=300)
print(stringFigName, ' saved.')
###### Plots of the wavelet approximation at each scale
nrPlots = len(wavelet_coeff)
nrRows,nrCols = dt.optimal_size_subplot(nrPlots)
fig = plt.figure()
ax = fig.add_axes()
ax = fig.add_subplot(111)
for scale in range(1,nrPlots+1):
plt.subplot(nrRows, nrCols, scale)
im = plt.imshow(wavelet_coeff[scale-1], vmin=r.dbzThreshold, vmax=50, interpolation='nearest')
if scale == nrPlots:
scaleKm_l = (xvecs[scale-2][1] - xvecs[scale-2][0])*2
else:
scaleKm_l = xvecs[scale-1][1] - xvecs[scale-1][0]
scaleKm_l = int(scaleKm/1000)
titleStr = 'Scale = ' + str(scaleKm_l) + ' km'
plt.title(titleStr, fontsize=12)
plt.axis('off')
fig.tight_layout()
fig.subplots_adjust(top=0.92, right=0.8)
cbar_ax = fig.add_axes([0.90, 0.15, 0.03, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.suptitle('Low pass wavelet decomposition', fontsize=15)
stringFigName = '/users/lforesti/results/' + product + r.yearStr + r.julianDayStr \
+ r.hourminStr + '-' + wavelet + '-waveletApprox_' + timeAccumMinStr + '.png'
plt.savefig(stringFigName, dpi=300)
print(stringFigName, ' saved.')
################ PLOTTING RAINFIELD #################################
# ++++++++++++
if boolPlotting:
titlesSize = 20
labelsSize = 18
ticksSize = 16
unitsSize=14
colorbarTicksSize=14
mpl.rcParams['xtick.labelsize'] = ticksSize
mpl.rcParams['ytick.labelsize'] = ticksSize
plt.close("all")
analysisFFT = []
for i in range(0,len(analysis)):
if (analysis[i] == '1d') or (analysis[i] == '2d') or (analysis[i] == 'autocorr') or (analysis[i] == '1d+2d+autocorr') or (analysis[i] == '2dnoise') or (analysis[i] == '2d+autocorr'):
analysisFFT.append(analysis[i])
# Loop over different analyses (1d, 2d autocorr)
for an in analysisFFT:
if an == '1d+2d+autocorr':
fig = plt.figure(figsize=(18,18))
elif an == '2d+autocorr':
fig = plt.figure(figsize=(8.3,20))
else:
fig = plt.figure(figsize=(16,7.5))
ax = fig.add_axes()
ax = fig.add_subplot(111)
if an == '1d+2d+autocorr':
rainAx = plt.subplot(221)
elif an == '2d+autocorr':
rainAx = plt.subplot(311)
else:
rainAx = plt.subplot(121)
# Draw DEM
rainAx.imshow(demImg, extent = r.extent, vmin=100, vmax=3000, cmap = plt.get_cmap('gray'))
# Draw rainfield
rainIm = rainAx.imshow(r.rainrateNans, extent = r.extent, cmap=cmap, norm=norm, interpolation='nearest')
# Draw shapefile
gis.read_plot_shapefile(fileNameShapefile, proj4stringWGS84, proj4stringCH, ax=rainAx, linewidth = 0.75)
if (nrValidFields >= 2) and ('of' in analysis):
ycoord_flipped = fftDomainSize-1-ys
plt.quiver(Xmin+xs*1000, Ymin+ycoord_flipped*1000, Us, -Vs, angles = 'xy', scale_units='xy')
#plt.quiver(Xmin+x*1000, Ymin+ycoord_flipped*1000, u, -v, angles = 'xy', scale_units='xy')
# Colorbar
cbar = plt.colorbar(rainIm, ticks=clevs, spacing='uniform', norm=norm, extend='max', fraction=0.04)
cbar.ax.tick_params(labelsize=colorbarTicksSize)
cbar.set_ticklabels(clevsStr, update_ticks=True)
if (timeAccumMin == 1440):
cbar.ax.set_title(" mm/day",fontsize=unitsSize)
elif (timeAccumMin == 60):
cbar.ax.set_title(" mm/h",fontsize=unitsSize)
elif (timeAccumMin == 5):
if an == '2d+autocorr':
cbar.set_label(r"mm h$^{-1}$",fontsize=unitsSize)
else:
cbar.ax.set_title(r" mm hr$^{-1}$",fontsize=unitsSize)
else:
print('Accum. units not defined.')
#cbar.ax.xaxis.set_label_position('top')
# # Set ticks for dBZ on the other side
# ax2 =plt.twinx(ax=cbar.ax)
# dBZlimits,_,_ = dt.rainrate2reflectivity(clevs,A,b)
# dBZlimits = np.round(dBZlimits)
# ax2.set_ylim(-10, 10)
# ax2.set_yticklabels(dBZlimits)
titleStr = timeLocal.strftime("%Y.%m.%d %H:%M") + ', ' + product + ' rainfall field, Q' + str(r.dataQuality)
titleStr = 'Radar rainfall field on ' + timeLocal.strftime("%Y.%m.%d %H:%M")
plt.title(titleStr, fontsize=titlesSize)
# Draw radar composite mask
rainAx.imshow(r.mask, cmap=r.cmapMask, extent = r.extent, alpha = 0.5)
# Add product quality within image
dataQualityTxt = "Quality = " + str(r.dataQuality)
if (an == 'of'):
plt.text(-0.15,-0.12, "Eulerian correlation = " + fmt3 % corr_eul_lag1, transform=rainAx.transAxes)
plt.text(-0.15,-0.15, "Lagrangian correlation = " + fmt3 % corr_lagr_lag1, transform=rainAx.transAxes)
diffPercEulLagr = (corr_lagr_lag1 - corr_eul_lag1)*100
plt.text(-0.15,-0.18, "Difference Lagr/Eul = " + fmt2 % diffPercEulLagr + ' %', transform=rainAx.transAxes)
# Set X and Y ticks for coordinates
xticks = np.arange(400, 900, 100)
yticks = np.arange(0, 500 ,100)
plt.xticks(xticks*1000, xticks)
plt.yticks(yticks*1000, yticks)
plt.xlabel('Swiss Easting [km]', fontsize=labelsSize)
plt.ylabel('Swiss Northing [km]', fontsize=labelsSize)
#################### PLOT SPECTRA ###########################################################
#++++++++++++ Draw 2d power spectrum
if (an == '2d') | (an == '2dnoise') | (an == '2d+autocorr') | (an == '1d+2d+autocorr'):
if an == '1d+2d+autocorr':
psAx2 = plt.subplot(222)
elif an == '2d+autocorr':
psAx2 = plt.subplot(312)
else:
psAx2 = plt.subplot(122)
if fourierVar == 'rainrate':
psLims =[-50,40]
if fourierVar == 'dbz':
psLims = [-20,70]
extentFFT = (-minFieldSize/2,minFieldSize/2,-minFieldSize/2,minFieldSize/2)
if (an == '2d') | (an == '2d+autocorr') | (an == '1d+2d+autocorr'):
# Smooth 2d PS for plotting contours
if cov2logPS == False:
psd2dsubSmooth = 10.0*np.log10(psd2dsubSmooth)
# Plot image of 2d PS
#psAx2.invert_yaxis()
clevsPS = np.arange(-5,70,5)
cmapPS = plt.get_cmap('nipy_spectral', clevsPS.shape[0]) #nipy_spectral, gist_ncar
normPS = colors.BoundaryNorm(clevsPS, cmapPS.N-1)
cmapPS.set_over('white',1)
# Compute alpha transparency vector
#cmapPS._init()
#cmapPS._lut[clevsPS <= percZero,-1] = 0.5
if cov2logPS:
imPS = psAx2.imshow(psd2dsub, interpolation='nearest', cmap=cmapPS, norm=normPS)
else:
imPS = psAx2.imshow(10.0*np.log10(psd2dsub), interpolation='nearest', cmap=cmapPS, norm=normPS)
# Plot smooth contour of 2d PS
# percentiles = [70,80,90,95,98,99,99.5]
# levelsPS = np.array(st.percentiles(psd2dsubSmooth, percentiles))
# print("Contour levels quantiles: ",percentiles)
# print("Contour levels 2d PS : ", levelsPS)
# if np.sum(levelsPS) != 0:
# im1 = psAx2.contour(psd2dsubSmooth, levelsPS, colors='black', alpha=0.25)
# im1 = psAx2.contour(psd2dsubSmooth, [percZero], colors='black', linestyles='dashed')
# Plot major and minor axis of anisotropy
#st.plot_bars(xbar_ps, ybar_ps, eigvals_ps, eigvecs_ps, psAx2, 'red')
#plt.text(0.05, 0.95, 'eccentricity = ' + str(fmt2 % eccentricity_ps), transform=psAx2.transAxes, backgroundcolor = 'w', fontsize=14)
#plt.text(0.05, 0.90, 'orientation = ' + str(fmt2 % orientation_ps) + '$^\circ$', transform=psAx2.transAxes,backgroundcolor = 'w', fontsize=14)
# Create ticks in km
ticks_loc = np.arange(0,2*fftSizeSub,1)
# List of ticks for X and Y (reference from top)
ticksListX = np.hstack((np.flipud(-resKm/freq[1:fftSizeSub+1]),0,resKm/freq[1:fftSizeSub])).astype(int)
ticksListY = np.flipud(ticksListX)
# List of indices where to display the ticks
if fftSizeSub <= 20:
idxTicksX = np.hstack((np.arange(0,fftSizeSub-1,2),fftSizeSub-1,fftSizeSub+1,np.arange(fftSizeSub+2,2*fftSizeSub,2))).astype(int)
idxTicksY = np.hstack((np.arange(1,fftSizeSub-2,2),fftSizeSub-2,fftSizeSub,np.arange(fftSizeSub+1,2*fftSizeSub,2))).astype(int)
else:
idxTicksX = np.hstack((np.arange(1,fftSizeSub-2,4),fftSizeSub-1,fftSizeSub+1,np.arange(fftSizeSub+3,2*fftSizeSub,4))).astype(int)
idxTicksY = np.hstack((np.arange(0,fftSizeSub-3,4),fftSizeSub-2,fftSizeSub,np.arange(fftSizeSub+2,2*fftSizeSub,4))).astype(int)
plt.xticks(rotation=90)
psAx2.set_xticks(ticks_loc[idxTicksX])
psAx2.set_xticklabels(ticksListX[idxTicksX], fontsize=13)
psAx2.set_yticks(ticks_loc[idxTicksY])
psAx2.set_yticklabels(ticksListY[idxTicksY], fontsize=13)
plt.xlabel('Wavelength [km]', fontsize=labelsSize)
plt.ylabel('Wavelength [km]', fontsize=labelsSize)
#plt.gca().invert_yaxis()
else:
#plt.contourf(10*np.log10(psd2dnoise), 20, vmin=-15, vmax=0)
imPS = plt.imshow(10*np.log10(psd2dnoise), extent=(extentFFT[0], extentFFT[1], extentFFT[2], extentFFT[3]), vmin=-15, vmax=0)
plt.gca().invert_yaxis()
cbar = plt.colorbar(imPS, ticks=clevsPS, spacing='uniform', norm=normPS, extend='max', fraction=0.04)
cbar.ax.tick_params(labelsize=colorbarTicksSize)
cbar.set_label(unitsSpectrum, fontsize=unitsSize)
#cbar.ax.set_title(unitsSpectrum, fontsize=unitsSize)
titleStr = '2D power spectrum (rotated by 90$^\circ$)'
plt.title(titleStr, fontsize=titlesSize)
#++++++++++++ Draw autocorrelation function
if (an == 'autocorr') | (an == '2d+autocorr') | (an == '1d+2d+autocorr'):
if an == '1d+2d+autocorr':
autocorrAx = plt.subplot(223)
elif an == '2d+autocorr':
autocorrAx = plt.subplot(313)
else:
autocorrAx = plt.subplot(122)
maxAutocov = np.max(autocorrSub)
if maxAutocov > 50:
clevsPS = np.arange(0,200,10)
elif maxAutocov > 10:
clevsPS = np.arange(0,50,5)
else:
clevsPS = np.arange(-0.05,1.05,0.05)
clevsPSticks = np.arange(-0.1,1.1,0.1)
cmapPS = plt.get_cmap('nipy_spectral', clevsPS.shape[0]) #nipy_spectral, gist_ncar
normPS = colors.BoundaryNorm(clevsPS, cmapPS.N)
cmaplist = [cmapPS(i) for i in range(cmapPS.N)]
# force the first color entry to be white
#cmaplist[0] = (1,1,1,1.0)
# Create the new map
cmapPS = cmapPS.from_list('Custom cmap', cmaplist, cmapPS.N)
cmapPS.set_under('white',1)
ext = (-autocorrSizeSub, autocorrSizeSub, -autocorrSizeSub, autocorrSizeSub)
imAC = autocorrAx.imshow(autocorrSub, cmap=cmapPS, norm=normPS, extent = ext)
#cbar = plt.colorbar(imAC, ticks=clevsPS, spacing='uniform', norm=normPS, extend='max', fraction=0.03)
cbar = plt.colorbar(imAC, ticks=clevsPSticks, spacing='uniform', extend='min', norm=normPS,fraction=0.04)
cbar.ax.tick_params(labelsize=colorbarTicksSize)
cbar.set_label('correlation coefficient', fontsize=unitsSize)
im1 = autocorrAx.contour(np.flipud(autocorrSub), clevsPS, colors='black', alpha = 0.25, extent = ext)
im1 = autocorrAx.contour(np.flipud(autocorrSub), [percZero_autocorr], colors='black', linestyles='dashed', extent = ext)
# Plot major and minor axis of anisotropy
xbar_autocorr = xbar_autocorr - autocorrSizeSub
ybar_autocorr = ybar_autocorr - autocorrSizeSub
# Reverse sign of second dimension for plotting
eigvecs_autocorr[1,:] = -eigvecs_autocorr[1,:]
st.plot_bars(xbar_autocorr, ybar_autocorr, eigvals_autocorr, eigvecs_autocorr, autocorrAx, 'red')
# autocorrAx.invert_yaxis()
# autocorrAx.axis('image')
if an == '2d+autocorr':
xoffset = 0.05
yoffset = 0.93
yspace = 0.04
eccFontSize = 12
else:
xoffset = 0.05
yoffset = 0.95
yspace = 0.05
eccFontSize = 14
plt.text(xoffset, yoffset, 'eccentricity = ' + str(fmt2 % eccentricity_autocorr), transform=autocorrAx.transAxes, backgroundcolor = 'w', fontsize=eccFontSize)
plt.text(xoffset, yoffset-yspace, 'orientation = ' + str(fmt2 % orientation_autocorr) + '$^\circ$', transform=autocorrAx.transAxes,backgroundcolor = 'w', fontsize=eccFontSize)
plt.xticks(rotation=90)
autocorrAx.set_xlabel('Spatial lag [km]', fontsize=labelsSize)
autocorrAx.set_ylabel('Spatial lag [km]', fontsize=labelsSize)
titleStr = str(timeLocal) + ', 2D autocorrelation function (ifft(spectrum))'
titleStr = '2D autocorrelation function'
autocorrAx.set_title(titleStr, fontsize=titlesSize)
#++++++++++++ Draw 1D power spectrum
if (an == '1d') | (an == '1dnoise') | (an == '1d+2d+autocorr'):
if an == '1d+2d+autocorr':
psAx = plt.subplot(224)
else:
psAx = plt.subplot(122)
freqLimBeta1 = np.array([resKm/float(largeScalesLims[0]),resKm/float(largeScalesLims[1])])
psdLimBeta1 = intercept_beta1+beta1*10*np.log10(freqLimBeta1)
plt.plot(10*np.log10(freqLimBeta1), psdLimBeta1,'b--')
freqLimBeta2 = np.array([resKm/float(smallScalesLims[0]),resKm/float(smallScalesLims[1])])
psdLimBeta2 = intercept_beta2+beta2*10*np.log10(freqLimBeta2)
plt.plot(10*np.log10(freqLimBeta2), psdLimBeta2,'r--')
# Draw turning point
plt.vlines(x=10*np.log10(1.0/scalingBreak_best), ymin=psdLimBeta2[0]-5, ymax = psdLimBeta2[0]+5, linewidth=0.5, color='grey')
# Write betas and correlations
startX = 0.67
startY = 0.95
offsetY = 0.04
if weightedOLS == 0:
txt = "Ordinary least squares"
if weightedOLS == 1:
txt = "Weighted ordinary least squares"
# psAx.text(startX,startY, txt, color='k', transform=psAx.transAxes)
txt = r'$\beta_1$ = ' + (fmt2 % beta1) + ", r = " + (fmt3 % r_beta1)
psAx.text(startX,startY-offsetY, txt, color='b', transform=psAx.transAxes)
txt = r'$\beta_2$ = ' + (fmt2 % beta2) + ", r = " + (fmt3 % r_beta2)
psAx.text(startX,startY-2*offsetY, txt, color='r', transform=psAx.transAxes)
txt = 'WAR = ' + (fmt1 % r.war) + ' %'
psAx.text(startX,startY-3*offsetY, txt, transform=psAx.transAxes)
txt = 'MM = ' + (fmt3 %raincondmean) + ' mm/hr'
psAx.text(startX,startY-4*offsetY, txt, transform=psAx.transAxes)
# if (args.minR < 0.01):
# txt = 'Rmin = ' + (fmt3 % args.minR) + ' mm/hr'
# else:
# txt = 'Rmin = ' + (fmt2 % args.minR) + ' mm/hr'
# psAx.text(startX,startY-5*offsetY, txt, transform=psAx.transAxes)
# txt = 'Scaling break = ' + str(scalingBreak_best) + ' km'
# psAx.text(startX,startY-6*offsetY, txt, transform=psAx.transAxes)
# txt = 'Zeros = ' + (fmt1 % zerosDBZ) + ' dBZ - ' + (fmt2 % args.minR) + ' mm/hr'
# psAx.text(startX,startY-7*offsetY, txt, transform=psAx.transAxes, fontsize=10)
if an == '1dnoise':
# Draw 1d noise spectrum
plt.plot(10*np.log10(freq),10*np.log10(psd1dnoise),'k')
else:
# Draw Power spectrum
#print(10*np.log10(freq))
plt.plot(10*np.log10(freq),10*np.log10(psd1d),'k')
titleStr = 'Radially averaged power spectrum'
plt.title(titleStr, fontsize=titlesSize)
plt.xlabel("Wavelength [km]", fontsize=15)
plt.ylabel(unitsSpectrum, fontsize= 15)
if fourierVar == 'rainrate':
plt.ylim([-50.0,40.0])
if fourierVar == 'dbz':
plt.ylim([-20.0,70.0])
# Create ticks in km
ticksList = []
tickLocal = minFieldSize
for i in range(0,20):
ticksList.append(tickLocal)
tickLocal = tickLocal/2
if tickLocal < resKm:
break
ticks = np.array(ticksList, dtype=int)
ticks_loc = 10.0*np.log10(1.0/ticks)
psAx.set_xticks(ticks_loc)
psAx.set_xticklabels(ticks)
# if (an == '1d+2d+autocorr'):
# psAx.set_aspect('equal')
#plt.gcf().subplots_adjust(bottom=0.15, left=0.20)
if (an == '1d+2d+autocorr'):
plt.subplots_adjust(hspace=0.2, wspace=0.35)
else:
fig.tight_layout()
########### SAVE AND COPY PLOTS
# Save plot in scratch
analysisType = an + 'PS'
stringFigName, inDir,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal,\
product, timeAccumMin=timeAccumMin, quality=0, minR=args.minR, wols=weightedOLS, format='png')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.savefig(stringFigName,dpi=300)
print(stringFigName, ' saved.')
# Copy plot to /store
stringFigNameOut, outDir,_ = io.get_filename_stats(outBaseDir, analysisType, timeLocal, product, timeAccumMin=timeAccumMin, \
quality=0, minR=args.minR, wols=weightedOLS, format='png')
cmd = 'mkdir -p ' + outDir
os.system(cmd)
shutil.copy(stringFigName, stringFigNameOut)
print('Copied: ', stringFigName, ' to ', stringFigNameOut)
else:
nrValidFields = 0 # Reset to 0 the number of valid fields with consecutive rainfall
print('Not enough rain to compute statistics')
############ WRITE OUT DAILY STATS ###########################
print('------------------')
print('Nr valid samples during day: ', len(dailyStats))
minNrDailySamples = 2
try:
conditionForWriting = (len(dailyStats) >= minNrDailySamples) and ((hourminStr == '0000') or (timeLocal == timeEnd))
except:
print(dir(r))
sys.exit(1)
if conditionForWriting:
# List to numpy array
dailyStats = np.array(dailyStats)
# Write stats in the directory of previous day if last time stamp (midnight of next day)
timePreviousDay = timeLocal - datetime.timedelta(days = 1)
# Generate filenames
analysisType = 'STATS'
if hourminStr == '0000':
fileNameStats,_,_ = io.get_filename_stats(inBaseDir, analysisType, timePreviousDay, product, timeAccumMin=timeAccumMin,\
quality=0, minR=args.minR, wols=weightedOLS, variableBreak = variableBreak, format=args.format)
else:
fileNameStats,_,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal, product, timeAccumMin=timeAccumMin,\
quality=0, minR=args.minR, wols=weightedOLS, variableBreak = variableBreak, format=args.format)
# Write out files
spectralSlopeLims = [largeScalesLims_best[0], largeScalesLims_best[1], smallScalesLims_best[1]]
if (boolPlotting == False):
if args.format == 'csv':
# Write out CSV file
io.write_csv_globalstats(fileNameStats, headers, dailyStats)
elif args.format == 'netcdf':
# Write out NETCDF file
io.write_netcdf_globalstats(fileNameStats, headers, dailyStats, str(args.minR), str(weightedOLS), spectralSlopeLims)
print(fileNameStats, ' saved.')
#### Print out some average daily stats
eulerian_corr_vector = np.array(dt.get_column_list(dailyStats,22)).astype(float)
lagrangian_corr_vector = np.array(dt.get_column_list(dailyStats,23)).astype(float)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
print('Daily average Eulerian correlation =',np.nanmean(eulerian_corr_vector))
print('Daily average Lagrangian correlation =',np.nanmean(lagrangian_corr_vector))
print('Daily difference Eul-Lagr correlation =',100*(np.nanmean(lagrangian_corr_vector) - np.nanmean(eulerian_corr_vector)),'%')
#### Write out wavelet decomposed rainfall arrays
if 'wavelets' in analysis:
# Write out wavelet coefficients to netCDF file
analysisType = 'WAVELET'
if hourminStr == '0000':
fileNameWavelet,_,_ = io.get_filename_wavelets(inBaseDir, analysisType, timePreviousDay, product, \
timeAccumMin=timeAccumMin, scaleKM=scaleKm, format='netcdf')
else:
fileNameWavelet,_,_ = io.get_filename_wavelets(inBaseDir, analysisType, timeLocal, product, \
timeAccumMin=timeAccumMin, scaleKM=scaleKm, format='netcdf')
#timePreviousDayStr = ti.datetime2timestring(timePreviousDay)
# Write out netCDF file
io.write_netcdf_waveletscale(fileNameWavelet, dailyTimesWavelets, \
xvecs[scale2keep], yvecs[scale2keep], dailyWavelets, scaleKm, waveletType = wavelet)
print('Saved:', fileNameWavelet)
# Copy wavelet netCDFs to /store
outFileNameWavelet,outDir,_ = io.get_filename_wavelets(outBaseDir, analysisType, timePreviousDay, product, \
timeAccumMin=timeAccumMin, scaleKM=scaleKm, format='netcdf')
cmd = 'mkdir -p ' + outDir
os.system(cmd)
shutil.copy(fileNameWavelet, outFileNameWavelet)
print('Copied: ', fileNameWavelet, ' to ', outFileNameWavelet)
#### Reset dailyStats array
dailyStats = []
dailyWavelets = []
dailyTimesWavelets = []
############ WRITE OUT DAILY VELOCITY FIELDS ###########################
if conditionForWriting and ('of' in analysis):
analysisType = 'VELOCITY'
fileNameFlow,_,_ = io.get_filename_stats(inBaseDir, analysisType, timeLocal, product, \
timeAccumMin=timeAccumMin, quality=0, format='netcdf')
xvec = Xmin + colgrid*1000
yvec = Ymax - rowgrid*1000 # turn Y vector to start from highest value on top
io.write_netcdf_flow(fileNameFlow, dailyTimesUV, xvec, yvec, dailyU, dailyV)
print(fileNameFlow, 'saved.')
#### Reset daily U,V arrays
dailyU = []
dailyV = []
dailyTimesUV = []
####### UPDATE TIME STAMPS
# Add 5 minutes (or one hour if working with longer accumulations)
timeLocal = timeLocal + datetime.timedelta(minutes = timeSampMin)
tocOneImg = time.clock()
#print('Elapsed time: ', tocOneImg - ticOneImg)
toc = time.clock()
print('Total archive elapsed time: ', toc-tic, ' seconds.')
| gpl-3.0 |
aabadie/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
maxlikely/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 4 | 2586 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD Style.
import numpy as np
import pylab as pl
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate(10. ** np.arange(1, 4)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%d" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = pl.subplot(3, 2, 2 * i + 1)
l2_plot = pl.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
pl.text(-8, 3, "C = %d" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
pl.show()
| bsd-3-clause |
fabianp/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
rajeevsingh717/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
jasonmccampbell/scipy-refactor | scipy/interpolate/ndgriddata.py | 1 | 6057 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
import numpy as np
from interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Parameters
----------
points : ndarray of floats, shape (npoints, ndims)
Data point coordinates.
values : ndarray of float or complex, shape (npoints, ...)
Data values.
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y):
x = _ndim_coords_from_arrays(x)
self._check_init_shape(x, y)
self.tree = cKDTree(x)
self.points = x
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args)
xi = self._check_call_shape(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan):
"""
Interpolate unstructured N-dimensional data.
.. versionadded:: 0.9
Parameters
----------
points : ndarray of floats, shape (npoints, ndims)
Data point coordinates. Can either be a ndarray of
size (npoints, ndim), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (npoints, ...)
Data values.
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
method : {'linear', 'nearest', 'cubic'}
Method of interpolation. One of
- ``nearest``: return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
- ``linear``: tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
- ``cubic`` (1-D): return the value detemined from a cubic
spline.
- ``cubic`` (2-D): return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
>>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/types/generic.py | 8 | 2844 | """ define generic base classes for pandas objects """
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, '_typ') in comp
dct = dict(__instancecheck__=_check, __subclasscheck__=_check)
meta = type("ABCBase", (type, ), dct)
return meta(name, tuple(), dct)
ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index", ))
ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ",
("int64index", ))
ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ",
("rangeindex", ))
ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ",
("float64index", ))
ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ",
("multiindex", ))
ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ",
("datetimeindex", ))
ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ",
("timedeltaindex", ))
ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ",
("periodindex", ))
ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ",
("categoricalindex", ))
ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ",
("index", "int64index", "rangeindex",
"float64index",
"multiindex", "datetimeindex",
"timedeltaindex", "periodindex",
"categoricalindex"))
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", ))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", ))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel", "panel4d"))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ",
("categorical"))
ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", ))
class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
| mit |
ContinuumIO/numpy | numpy/lib/recfunctions.py | 148 | 35012 | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
from __future__ import division, absolute_import, print_function
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
if sys.version_info[0] < 3:
from future_builtins import zip
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'drop_fields', 'find_duplicates',
'get_fieldstructure', 'join_by', 'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields', 'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, basestring)):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in zip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" %
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names), set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| bsd-3-clause |
macks22/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
stephenliu1989/HK_DataMiner | hkdataminer/template_matching/TSTM.py | 1 | 1745 | import argparse
import numpy as np
from sklearn.decomposition import PCA
import sh
import mrcfile
from Select_angle import cal_var_byPCA
from gen_2D_image import gen_image
from two_stage_matching import matching_algorithm
# training parameters
parser = argparse.ArgumentParser()
parser.add_argument('--vol_size', type=int, default=128, help='3D conformation size is vol_size*vol_size*vol_size')
parser.add_argument('--n_components', type=int, default=3, help='pca components')
parser.add_argument('--conf_size', type=float, default=99, help='3D conformations number: conf_size')
parser.add_argument('--datatype', type=str, default='sim', help='dataset type: simulation or real')
parser.add_argument('--path1', type=str, default='./select_angle', help='path of select angle')
parser.add_argument('--path2', type=str, default='./data', help='path of data')
parser.add_argument('--path3', type=str, default='./gen_2D_image/', help='path of gen 2d image')
parser.add_argument('--path4', type=str, default='./two_stage_matching/', help='path of matching stage')
opt = parser.parse_args()
def main():
###Select angle
select_anlge = cal_var_byPCA.Select_anlge_step(n_components=opt.n_component, vol_size=opt,vol_size, conf_size=opt.conf_size, outputname=opt.path1)
input = select_angle.load_3Dmrc(opt.path2 + '/test_data_'+ opt.datatype) #load 3D mrcs
pc_eigenvalue = select_angle.PCA(input)
select_angle.project(pc_eigenvalue)
select_anlge.cal_contribution_from_each_pc_original_weighted()
select_angle.find_best_anlge()
###gen_2D_images
gen_image.gen_image(opt.datatype, opt.path3)
### two_stage_matching
matching_algorithm.two_stage_matching(opt.datatype, opt.path4)
if __name__ == '__main__':
main()
| apache-2.0 |
helshowk/nnpy | test_neural.py | 1 | 2010 | #!/usr/bin/env python2
import numpy
import model, neural, cost, layer, activation
import matplotlib.pyplot as plt
if __name__ == "__main__":
trainData = dict()
trainData['x'] = numpy.random.rand(50000,1)*3*numpy.pi
trainData['t'] = numpy.sin(trainData['x'])
#trainData['t'] = numpy.zeros((trainData['x'].shape[0],2))
#for idx,x in enumerate(trainData['x']):
#trainData['t'][idx][0] = numpy.sin(x)
#trainData['t'][idx][1] = 0.5*numpy.sin(x) + 2
avg = numpy.average(trainData['x'])
std = numpy.std(trainData['x'])
network = neural.NN(cost.quadratic, cost.dquadratic)
#network.l2_coefficient = 0.0001
network.learn_rate = 0.005
network.momentum = 0.5
network.addLayer(layer.Layer(1, 100, activation.rectified, activation.d_rectified))
#network.addLayer(layer.Layer(100, 100, activation.rectified, activation.d_rectified))
network.addLayer(layer.Layer(100, 1, activation.identity, activation.one))
#network.dropout_p = 0.5
testData = dict()
testData['x'] = numpy.random.rand(1000,1)*3*numpy.pi
testData['t'] = numpy.sin(testData['x'])
#testData['t'] = numpy.zeros((testData['x'].shape[0],2))
#for idx,x in enumerate(testData['x']):
#testData['t'][idx][0] = numpy.sin(x)
#testData['t'][idx][1] = 0.5*numpy.sin(x) + 2
m = model.Model(network, notes='Sin()')
m.runModel(10, trainData, testData, updateType='', early_stopping=-1, validation=0, batchSize=100, screen_debug=True, normalize=False)
xr = numpy.random.rand(200,1)*3*numpy.pi
y1 = list()
#y2 = list()
##for i in trainData['x']:
##y1.append(network.forward(i)[0])
##y2.append(network.forward(i)[1])
for i in xr:
result = network.forward(i)[0]
y1.append(result[0])
#y2.append(result[1])
plt.plot(trainData['x'],trainData['t'], 'ro')
##plt.plot(xr,ans, 'ro')
plt.plot(xr,y1, 'bo')
#plt.plot(xr,y2, 'go')
plt.show()
| gpl-2.0 |
Akshay0724/scikit-learn | examples/svm/plot_svm_scale_c.py | 60 | 5411 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features // 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features // 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size,
n_splits=250, random_state=1))
grid.fit(X, y)
scores = grid.cv_results_['mean_test_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
quantopian/zipline | zipline/assets/synthetic.py | 1 | 11183 | from itertools import product
from string import ascii_uppercase
import pandas as pd
from pandas.tseries.offsets import MonthBegin
from .futures import CMES_CODE_TO_MONTH
def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime,
exchange='TEST'):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': exchange,
},
index=range(num_assets),
)
def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange='TEST'):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': exchange,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
)
def make_simple_multi_country_equity_info(countries_to_sids,
countries_to_exchanges,
start_date,
end_date):
"""Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`, from multiple countries.
"""
sids = []
symbols = []
exchanges = []
for country, country_sids in countries_to_sids.items():
exchange = countries_to_exchanges[country]
for i, sid in enumerate(country_sids):
sids.append(sid)
symbols.append('-'.join([country, str(i)]))
exchanges.append(exchange)
return pd.DataFrame(
{
'symbol': symbols,
'start_date': start_date,
'end_date': end_date,
'asset_name': symbols,
'exchange': exchanges,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
)
def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame
def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None,
multiplier=500):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CMES_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01) sorted by year/month
# `MonthBegin(month_num - 1)` since the year already starts at month 1.
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num - 1))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
sorted(list(month_codes.items()), key=lambda item: item[1]),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': expiration_date_func(month_begin),
'multiplier': multiplier,
'exchange': "TEST",
})
return pd.DataFrame.from_records(contracts, index='sid')
def make_commodity_future_info(first_sid,
root_symbols,
years,
month_codes=None,
multiplier=500):
"""
Make futures testing data that simulates the notice/expiration date
behavior of physical commodities like oil.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CMES_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Expiration dates are on the 20th of the month prior to the month code.
Notice dates are are on the 20th two months prior to the month code.
Start dates are one year before the contract month.
See Also
--------
make_future_info
"""
nineteen_days = pd.Timedelta(days=19)
one_year = pd.Timedelta(days=365)
return make_future_info(
first_sid=first_sid,
root_symbols=root_symbols,
years=years,
notice_date_func=lambda dt: dt - MonthBegin(2) + nineteen_days,
expiration_date_func=lambda dt: dt - MonthBegin(1) + nineteen_days,
start_date_func=lambda dt: dt - one_year,
month_codes=month_codes,
multiplier=multiplier,
)
| apache-2.0 |
jakobj/nest-simulator | pynest/nest/tests/test_get_set.py | 5 | 21303 | # -*- coding: utf-8 -*-
#
# test_get_set.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NodeCollection get/set tests
"""
import unittest
import nest
import json
try:
import numpy as np
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
try:
import pandas
import pandas.util.testing as pt
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@nest.ll_api.check_stack
class TestNodeCollectionGetSet(unittest.TestCase):
"""NodeCollection get/set tests"""
def setUp(self):
nest.ResetKernel()
def test_get(self):
"""
Test that get function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
C_m = nodes.get('C_m')
node_ids = nodes.get('global_id')
E_L = nodes.get('E_L')
V_m = nodes.get('V_m')
t_ref = nodes.get('t_ref')
g = nodes.get(['local', 'thread', 'vp'])
local = g['local']
thread = g['thread']
vp = g['vp']
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(node_ids, tuple(range(1, 11)))
self.assertEqual(E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertTrue(local)
self.assertEqual(thread, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(vp, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
g_reference = {'local': (True, True, True, True, True,
True, True, True, True, True),
'thread': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'vp': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)}
self.assertEqual(g, g_reference)
def test_get_sliced(self):
"""
Test that get works on sliced NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
V_m = nodes[2:5].get('V_m')
g = nodes[5:7].get(['t_ref', 'tau_m'])
C_m = nodes[2:9:2].get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -70.0))
self.assertEqual(g['t_ref'], (2.0, 2.0))
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0))
def test_get_composite(self):
"""
Test that get function works on composite NodeCollections
"""
n1 = nest.Create('iaf_psc_alpha', 2)
n2 = nest.Create('iaf_psc_delta', 2)
n3 = nest.Create('iaf_psc_exp')
n4 = nest.Create('iaf_psc_alpha', 3)
n1.set(V_m=[-77., -88.])
n3.set({'V_m': -55.})
n1.set(C_m=[251., 252.])
n2.set(C_m=[253., 254.])
n3.set({'C_m': 255.})
n4.set(C_m=[256., 257., 258.])
n5 = n1 + n2 + n3 + n4
status_dict = n5.get()
# Check that we get values in correct order
vm_ref = (-77., -88., -70., -70., -55, -70., -70., -70.)
self.assertEqual(status_dict['V_m'], vm_ref)
# Check that we get None where not applicable
# tau_syn_ex is part of iaf_psc_alpha
tau_ref = (2., 2., None, None, 2., 2., 2., 2.)
self.assertEqual(status_dict['tau_syn_ex'], tau_ref)
# refractory_input is part of iaf_psc_delta
refrac_ref = (None, None,
False, False,
None, None,
None, None)
self.assertEqual(status_dict['refractory_input'], refrac_ref)
# Check that calling get with string works on composite NCs, both on
# parameters all the models have, and on individual parameters.
Cm_ref = [x * 1. for x in range(251, 259)]
Cm = n5.get('C_m')
self.assertEqual(list(Cm), Cm_ref)
refrac = n5.get('refractory_input')
self.assertEqual(refrac, refrac_ref)
@unittest.skipIf(not HAVE_NUMPY, 'NumPy package is not available')
def test_get_different_size(self):
"""
Test get with different input for different sizes of NodeCollections
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
empty_array_float = np.array([], dtype=np.float64)
empty_array_int = np.array([], dtype=np.int64)
# Single node, literal parameter
self.assertEqual(single_sr.get('start'), 0.0)
# Single node, array parameter
self.assertEqual(single_sr.get(['start', 'time_in_steps']),
{'start': 0.0, 'time_in_steps': False})
# Single node, hierarchical with literal parameter
np.testing.assert_array_equal(single_sr.get('events', 'times'),
empty_array_float)
# Multiple nodes, hierarchical with literal parameter
values = multi_sr.get('events', 'times')
for v in values:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, hierarchical with array parameter
values = single_sr.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
np.testing.assert_array_equal(values['senders'], empty_array_int)
np.testing.assert_array_equal(values['times'], empty_array_float)
# Multiple nodes, hierarchical with array parameter
values = multi_sr.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
self.assertEqual(len(values['senders']), len(multi_sr))
for v in values['senders']:
np.testing.assert_array_equal(v, empty_array_int)
for v in values['times']:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, no parameter (gets all values)
values = single_sr.get()
num_values_single_sr = len(values.keys())
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sr.get()
self.assertEqual(len(values.keys()), num_values_single_sr)
self.assertEqual(values['start'],
tuple(0.0 for i in range(len(multi_sr))))
@unittest.skipIf(not HAVE_PANDAS, 'Pandas package is not available')
def test_get_pandas(self):
"""
Test that get function with Pandas output works as expected.
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
empty_array_float = np.array([], dtype=np.float64)
# Single node, literal parameter
pt.assert_frame_equal(single_sr.get('start', output='pandas'),
pandas.DataFrame({'start': [0.0]},
index=tuple(single_sr.tolist())))
# Multiple nodes, literal parameter
pt.assert_frame_equal(multi_sr.get('start', output='pandas'),
pandas.DataFrame(
{'start': [0.0 for i in range(
len(multi_sr))]},
index=tuple(multi_sr.tolist())))
# Single node, array parameter
pt.assert_frame_equal(single_sr.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame({'start': [0.0],
'n_events': [0]},
index=tuple(single_sr.tolist())))
# Multiple nodes, array parameter
ref_dict = {'start': [0.0 for i in range(len(multi_sr))],
'n_events': [0]}
pt.assert_frame_equal(multi_sr.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sr.tolist())))
# Single node, hierarchical with literal parameter
pt.assert_frame_equal(single_sr.get('events', 'times',
output='pandas'),
pandas.DataFrame({'times': [[]]},
index=tuple(single_sr.tolist())))
# Multiple nodes, hierarchical with literal parameter
ref_dict = {'times': [empty_array_float
for i in range(len(multi_sr))]}
pt.assert_frame_equal(multi_sr.get('events', 'times',
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sr.tolist())))
# Single node, hierarchical with array parameter
ref_df = pandas.DataFrame(
{'times': [[]], 'senders': [[]]}, index=tuple(single_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sr.get(
'events', ['senders', 'times'], output='pandas'),
ref_df)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': [[] for i in range(len(multi_sr))],
'senders': [[] for i in range(len(multi_sr))]}
ref_df = pandas.DataFrame(
ref_dict,
index=tuple(multi_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
sr_df = multi_sr.get('events', ['senders', 'times'], output='pandas')
sr_df = sr_df.reindex(sorted(sr_df.columns), axis=1)
pt.assert_frame_equal(sr_df,
ref_df)
# Single node, no parameter (gets all values)
values = single_sr.get(output='pandas')
num_values_single_sr = values.shape[1]
self.assertEqual(values['start'][tuple(single_sr.tolist())[0]], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sr.get(output='pandas')
self.assertEqual(values.shape, (len(multi_sr), num_values_single_sr))
pt.assert_series_equal(values['start'],
pandas.Series({key: 0.0
for key in tuple(multi_sr.tolist())},
dtype=np.float64,
name='start'))
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sr)
nest.Connect(nodes, multi_sr, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [[31.8, 36.1, 38.5]],
'senders': [[17, 12, 20]]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(single_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sr.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(multi_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(multi_sr.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
def test_get_JSON(self):
"""
Test that get function with json output works as expected.
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
# Single node, literal parameter
self.assertEqual(json.loads(
single_sr.get('start', output='json')), 0.0)
# Multiple nodes, literal parameter
self.assertEqual(
json.loads(multi_sr.get('start', output='json')),
len(multi_sr) * [0.0])
# Single node, array parameter
ref_dict = {'start': 0.0, 'n_events': 0}
self.assertEqual(
json.loads(single_sr.get(['start', 'n_events'], output='json')),
ref_dict)
# Multiple nodes, array parameter
ref_dict = {'start': len(multi_sr) * [0.0],
'n_events': len(multi_sr) * [0]}
self.assertEqual(
json.loads(multi_sr.get(['start', 'n_events'], output='json')),
ref_dict)
# Single node, hierarchical with literal parameter
self.assertEqual(json.loads(single_sr.get(
'events', 'times', output='json')), [])
# Multiple nodes, hierarchical with literal parameter
ref_list = len(multi_sr) * [[]]
self.assertEqual(
json.loads(multi_sr.get('events', 'times', output='json')),
ref_list)
# Single node, hierarchical with array parameter
ref_dict = {'senders': [], 'times': []}
self.assertEqual(
json.loads(single_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': len(multi_sr) * [[]],
'senders': len(multi_sr) * [[]]}
self.assertEqual(
json.loads(multi_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Single node, no parameter (gets all values)
values = json.loads(single_sr.get(output='json'))
num_values_single_sr = len(values)
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = json.loads(multi_sr.get(output='json'))
self.assertEqual(len(values), num_values_single_sr)
self.assertEqual(values['start'], len(multi_sr) * [0.0])
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sr)
nest.Connect(nodes, multi_sr, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [31.8, 36.1, 38.5],
'senders': [17, 12, 20]}
self.assertEqual(
json.loads(single_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
self.assertEqual(
json.loads(multi_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
def test_set(self):
"""
Test that set function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
# Dict to set same value for all nodes.
nodes.set({'C_m': 100.0})
C_m = nodes.get('C_m')
self.assertEqual(C_m, (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
# Set same value for all nodes.
nodes.set(tau_Ca=500.0)
tau_Ca = nodes.get('tau_Ca')
self.assertEqual(tau_Ca, (500.0, 500.0, 500.0, 500.0, 500.0,
500.0, 500.0, 500.0, 500.0, 500.0))
# List of dicts, where each dict corresponds to a single node.
nodes.set(({'V_m': 10.0}, {'V_m': 20.0}, {'V_m': 30.0}, {'V_m': 40.0},
{'V_m': 50.0}, {'V_m': 60.0}, {'V_m': 70.0}, {'V_m': 80.0},
{'V_m': 90.0}, {'V_m': -100.0}))
V_m = nodes.get('V_m')
self.assertEqual(V_m, (10.0, 20.0, 30.0, 40.0, 50.0,
60.0, 70.0, 80.0, 90.0, -100.0))
# Set value of a parameter based on list. List must be length of nodes.
nodes.set(V_reset=[-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.])
V_reset = nodes.get('V_reset')
self.assertEqual(V_reset, (-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.))
with self.assertRaises(IndexError):
nodes.set(V_reset=[-85., -82., -80., -77., -75.])
# Set different parameters with a dictionary.
nodes.set({'t_ref': 44.0, 'tau_m': 2.0, 'tau_minus': 42.0})
g = nodes.get(['t_ref', 'tau_m', 'tau_minus'])
self.assertEqual(g['t_ref'], (44.0, 44.0, 44.0, 44.0, 44.0,
44.0, 44.0, 44.0, 44.0, 44.0))
self.assertEqual(g['tau_m'], (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertEqual(g['tau_minus'], (42.0, 42.0, 42.0, 42.0, 42.0,
42.0, 42.0, 42.0, 42.0, 42.0))
with self.assertRaises(nest.kernel.NESTError):
nodes.set({'vp': 2})
def test_set_composite(self):
"""
Test that set works on composite NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes[2:5].set(({'V_m': -50.0}, {'V_m': -40.0}, {'V_m': -30.0}))
nodes[5:7].set({'t_ref': 4.4, 'tau_m': 3.0})
nodes[2:9:2].set(C_m=111.0)
V_m = nodes.get('V_m')
g = nodes.get(['t_ref', 'tau_m'])
C_m = nodes.get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -50.0, -40.0, -30.0,
-70.0, -70.0, -70.0, -70.0, -70.0,))
self.assertEqual(g, {'t_ref': (2.0, 2.0, 2.0, 2.0, 2.0,
4.4, 4.4, 2.0, 2.0, 2.0),
'tau_m': (10.0, 10.0, 10.0, 10.0, 10.0,
3.00, 3.00, 10.0, 10.0, 10.0)})
self.assertEqual(C_m, (250.0, 250.0, 111.0, 250.0, 111.0,
250.0, 111.0, 250.0, 111.0, 250.0))
def test_get_attribute(self):
"""Test get using getattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
self.assertEqual(nodes.C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(nodes.global_id, tuple(range(1, 11)))
self.assertEqual(nodes.E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
with self.assertRaises(KeyError):
print(nodes.nonexistent_attribute)
self.assertIsNone(nodes.spatial)
spatial_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([2, 2]))
self.assertIsNotNone(spatial_nodes.spatial)
spatial_reference = {'network_size': 4,
'center': (0.0, 0.0),
'edge_wrap': False,
'extent': (1.0, 1.0),
'shape': (2, 2)}
self.assertEqual(spatial_nodes.spatial, spatial_reference)
def test_set_attribute(self):
"""Test set using setattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes.C_m = 100.0
self.assertEqual(nodes.get('C_m'), (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
v_reset_reference = (-85., -82., -80., -77., -75., -72., -70., -67., -65., -62.)
nodes.V_reset = v_reset_reference
self.assertEqual(nodes.get('V_reset'), v_reset_reference)
with self.assertRaises(IndexError):
nodes.V_reset = [-85., -82., -80., -77., -75.]
with self.assertRaises(nest.kernel.NESTError):
nodes.nonexistent_attribute = 1.
def suite():
suite = unittest.makeSuite(TestNodeCollectionGetSet, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/indexes/frozen.py | 20 | 4619 | """
frozen (immutable) data structures to support MultiIndexing
These are used for:
- .names (FrozenList)
- .levels & .labels (FrozenNDArray)
"""
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.io.formats.printing import pprint_thing
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
# Sidenote: This has to be of type list, otherwise it messes up PyTables
# typechecks
def __add__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(super(FrozenList, self).__add__(other))
__iadd__ = __add__
# Python 2 compat
def __getslice__(self, i, j):
return self.__class__(super(FrozenList, self).__getslice__(i, j))
def __getitem__(self, n):
# Python 3 compat
if isinstance(n, slice):
return self.__class__(super(FrozenList, self).__getitem__(n))
return super(FrozenList, self).__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(other + list(self))
def __eq__(self, other):
if isinstance(other, (tuple, FrozenList)):
other = list(other)
return super(FrozenList, self).__eq__(other)
__req__ = __eq__
def __mul__(self, other):
return self.__class__(super(FrozenList, self).__mul__(other))
__imul__ = __mul__
def __reduce__(self):
return self.__class__, (list(self),)
def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__.__name__)
def __unicode__(self):
return pprint_thing(self, quote_strings=True,
escape_chars=('\t', '\r', '\n'))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
str(self))
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
return res
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." %
self.__class__)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
put = itemset = fill = _disabled
def _shallow_copy(self):
return self.view()
def values(self):
"""returns *copy* of underlying array"""
arr = self.view(np.ndarray).copy()
return arr
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
prepr = pprint_thing(self, escape_chars=('\t', '\r', '\n'),
quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
def searchsorted(self, v, side='left', sorter=None):
"""
Find indices where elements of v should be inserted
in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""
# we are much more performant if the searched
# indexer is the same type as the array
# this doesn't matter for int64, but DOES
# matter for smaller int dtypes
# https://github.com/numpy/numpy/issues/5370
try:
v = self.dtype.type(v)
except:
pass
return super(FrozenNDArray, self).searchsorted(
v, side=side, sorter=sorter)
def _ensure_frozen(array_like, categories, copy=False):
array_like = coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
| mit |
manashmndl/scikit-learn | sklearn/svm/classes.py | 37 | 39951 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
kazemakase/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
h2educ/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/tests/test_grid_search.py | 68 | 28856 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| unlicense |
dimkal/mne-python | examples/inverse/plot_label_from_stc.py | 31 | 3963 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <[email protected]>
# Alex Gramfort <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True)
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
henridwyer/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 130 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
bcraenen/KFClassifier | other/plotters/plot_classifier_comparison.py | 66 | 4895 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| gpl-3.0 |
CartoDB/crankshaft | release/python/0.1.0/crankshaft/setup.py | 1 | 1300 |
"""
CartoDB Spatial Analysis Python Library
See:
https://github.com/CartoDB/crankshaft
"""
from setuptools import setup, find_packages
setup(
name='crankshaft',
version='0.1.0',
description='CartoDB Spatial Analysis Python Library',
url='https://github.com/CartoDB/crankshaft',
author='Data Services Team - CartoDB',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Mapping comunity',
'Topic :: Maps :: Mapping Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
keywords='maps mapping tools spatial analysis geostatistics',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
extras_require={
'dev': ['unittest'],
'test': ['unittest', 'nose', 'mock'],
},
# The choice of component versions is dictated by what's
# provisioned in the production servers.
# IMPORTANT NOTE: please don't change this line. Instead issue a ticket to systems for evaluation.
install_requires=['joblib==0.8.3', 'numpy==1.6.1', 'scipy==0.14.0', 'pysal==1.11.2', 'scikit-learn==0.14.1'],
requires=['pysal', 'numpy', 'sklearn'],
test_suite='test'
)
| bsd-3-clause |
jmargeta/scikit-learn | sklearn/ensemble/__init__.py | 5 | 1055 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble", "RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.